卷积神经网络的演化(4)-- 轻量化

MobileNet是为移动端和嵌入式端深度学习应用而设计的网络,使得在cpu上也能达到理想的速度要求。

MobileNet v1

MobileNet v1的结构非常简单,是一个复古的直筒结构,类似于VGG一样。除了第一层采用标准的卷积层之外,其他的层都为深度可分离卷积。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import torch
import torch.nn as nn

#========================================================
# 网络框架
#========================================================

class MobileNet_v1(nn.Module):
def __init__(self):
super(MobileNet_v1, self).__init__()
def conv_bn(inp, oup, stride):
# 传统的卷积:conv3*3+BN+ReLU
conv = nn.Sequential(
nn.Conv2d(inp, oup, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
return conv

def conv_dw(inp, oup, stride):
# 深度可分离卷积
return nn.Sequential(
# 深度卷积(depthwise convolution)
nn.Conv2d(inp, inp, kernel_size=3, stride=stride, padding=1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),

# 逐点卷积(pointwise convolution)
nn.Conv2d(inp, oup, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)

self.features = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
nn.AvgPool2d(7)
)
self.classifier = nn.Linear(1024, 1000)

def forward(self, x):
x = self.features(x)
x = x.view(-1, 1024)
x = self.classifier(x)
return x

#========================================================
# 主程序
#========================================================

model = MobileNet_v1()
print(model)

input_tensor = torch.randn((1, 3, 224, 224))
input_var = torch.autograd.Variable(input_tensor)
out = model(input_var)

MobileNet v2

MobileNet v2是对MobileNet v1的改进,同样是一个轻量级卷积神经网络,使用了线性瓶颈层。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import torch
import torch.nn as nn

#========================================================
# 瓶颈结构
#========================================================

class Bottleneck(nn.Module):
def __init__(self, in_planes, expansion, out_planes, repeat_times, stride):
super(Bottleneck, self).__init__()
inner_channels = in_planes*expansion

self.conv1 = nn.Sequential(
nn.Conv2d(in_planes, inner_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(inner_channels),
nn.ReLU(inplace=True),

nn.Conv2d(inner_channels, inner_channels, kernel_size=3, stride=stride, padding=1, groups=inner_channels, bias=False),
nn.BatchNorm2d(inner_channels),
nn.ReLU(inplace=True),

nn.Conv2d(inner_channels, out_planes, kernel_size=1, stride=1, groups=1, bias=False),
nn.BatchNorm2d(out_planes)
)

self.conv2 = nn.Sequential(
nn.Conv2d(out_planes, out_planes*expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_planes*expansion),
nn.ReLU(inplace=True),

nn.Conv2d(out_planes*expansion, out_planes*expansion, kernel_size=3, stride=1, padding=1, groups=out_planes, bias=False),
nn.BatchNorm2d(out_planes*expansion),
nn.ReLU(inplace=True),

nn.Conv2d(out_planes*expansion, out_planes, kernel_size=1, stride=1, groups=1, bias=False),
nn.BatchNorm2d(out_planes)
)
self.n = repeat_times

def forward(self, x):
out = self.conv1(x)
count = 2
while(count <= self.n):
temp = out
temp = self.conv2(temp)
out = out + temp
count = count+1
return out

#========================================================
# 网络框架
#========================================================

class MobileNet_v2(nn.Module):
def __init__(self):
super(MobileNet_v2,self).__init__()
def head():
# 首层
conv = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
return conv

def tail():
# 尾层
conv = nn.Sequential(
nn.Conv2d(320, 1280, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(1280),
nn.ReLU(inplace=True),

nn.AvgPool2d(kernel_size=7)
)
return conv

self.features = nn.Sequential(
head(),
Bottleneck(32, 1, 16, 1, 1),
Bottleneck(16, 6, 24, 2, 2),
Bottleneck(24, 6, 32, 3, 2),
Bottleneck(32, 6, 64, 4, 2),
Bottleneck(64, 6, 96, 3, 1),
Bottleneck(96, 6, 160, 3, 2),
Bottleneck(160, 6, 320, 1, 1),
tail()
)
self.classifier = nn.Conv2d(1280, 1000, kernel_size=1, stride=1, bias=False)

def forward(self, x):
x = self.features(x)
out = self.classifier(x)
return out.view(-1, 1000)

#========================================================
# 主程序
#========================================================

model = MobileNet_v2()
print(model)

input_tensor = torch.randn((1, 3, 224, 224))
input_var = torch.autograd.Variable(input_tensor)
out = model(input_var)
0%