# Split to Be Slim: 论文复现

04/24 15:11

Split to Be Slim: An Overlooked Redundancy in Vanilla Convolution 论文复现

## 3、SPConv详解

1. Representative部分执行k*k卷积提取重要信息；
2. Uncertain部分执行1*1卷积补充隐含细节信息。

### 3.3 代码复现

import paddle
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2D(in_planes, out_planes, kernel_size=3, stride=stride,
class SPConv_3x3(nn.Layer):
def __init__(self, inplanes=32, outplanes=32, stride=1, ratio=0.5):
super(SPConv_3x3, self).__init__()
self.inplanes_3x3 = int(inplanes*ratio)
self.inplanes_1x1 = inplanes - self.inplanes_3x3
self.outplanes_3x3 = int(outplanes*ratio)
self.outplanes_1x1 = outplanes - self.outplanes_3x3
self.outplanes = outplanes
self.stride = stride
self.gwc = nn.Conv2D(self.inplanes_3x3, self.outplanes, kernel_size=3, stride=self.stride,
self.pwc = nn.Conv2D(self.inplanes_3x3, self.outplanes, kernel_size=1)
self.conv1x1 = nn.Conv2D(self.inplanes_1x1, self.outplanes,kernel_size=1)
self.avgpool_s2_1 = nn.AvgPool2D(kernel_size=2,stride=2)
self.avgpool_s2_3 = nn.AvgPool2D(kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm2D(self.outplanes)
self.bn2 = nn.BatchNorm2D(self.outplanes)
self.ratio = ratio
self.groups = int(1/self.ratio)
def forward(self, x):
# print(x.shape)
b, c, _, _ = x.shape
x_3x3 = x[:,:int(c*self.ratio),:,:]
x_1x1 = x[:,int(c*self.ratio):,:,:]
out_3x3_gwc = self.gwc(x_3x3)
if self.stride ==2:
x_3x3 = self.avgpool_s2_3(x_3x3)
out_3x3_pwc = self.pwc(x_3x3)
out_3x3 = out_3x3_gwc + out_3x3_pwc
out_3x3 = self.bn1(out_3x3)
# use avgpool first to reduce information lost
if self.stride == 2:
x_1x1 = self.avgpool_s2_1(x_1x1)
out_1x1 = self.conv1x1(x_1x1)
out_1x1 = self.bn2(out_1x1)
out_31_ratio = nn.Softmax(axis=2)(out_31_ratio)
out = out_1x1 * (out_31_ratio[:,:,1].reshape([b, self.outplanes, 1, 1]).expand_as(out_1x1))\
+ out_3x3 * (out_31_ratio[:,:,0].reshape([b, self.outplanes, 1, 1]).expand_as(out_3x3))
return out
spconv = SPConv_3x3()
tmp = paddle.randn([1, 32, 224, 224])
conv_out1 = spconv(tmp)
print(conv_out1.shape)
W0724 22:30:03.841145 13041 gpu_resources.cc:61] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.2, Runtime API Version: 10.1
W0724 22:30:03.845882 13041 gpu_resources.cc:91] device: 0, cuDNN Version: 7.6.
[1, 32, 224, 224]
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:654: UserWarning: When training, we now always track global mean and variance.
"When training, we now always track global mean and variance.")

## 4、消融实验

import paddle
from paddle.vision.transforms import Compose, Normalize, Resize, Transpose, ToTensor
from sp_resnet import resnet18_sp
normalize = Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
data_format='HWC')
transform = Compose([ToTensor(), Normalize(), Resize(size=(224,224))])
transform=transform)
transform=transform)
# 构建训练集数据加载器
# 构建测试集数据加载器
res_sp.prepare(
optim,
Accuracy()
)
epochs=10,
callbacks=callback,
verbose=1
)
from paddle.vision.transforms import Compose, Normalize, Resize, Transpose, ToTensor
normalize = Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
data_format='HWC')
transform = Compose([ToTensor(), Normalize(), Resize(size=(224,224))])
transform=transform)
transform=transform)
# 构建训练集数据加载器
# 构建测试集数据加载器
res_18.prepare(
optim,
Accuracy()
)
epochs=10,
callbacks=callback,
verbose=1
)

## 5、实验结果分析

• 添加了SPConV模块的ResNet18效果反而不如原始的ResNet18

## 7、参考资料

Split to Be Slim: An Overlooked Redundancy in Vanilla Convolution

0 评论
0 收藏
0