卷积神经网络的演化(3)-- 增强卷积模块

2014年NiNNet用较少参数就取得了Alexnet的效果,开启了对卷积结构的探索。受此启发,GoogLeNet引入了一种能够产生稠密的数据的网络结构,既能增加神经网络表现,又能保证计算资源的使用效率。GoogLeNet及其后续系列模型表明了把图像不同尺度的信息进行融合可以得到图像更好的表征。

NiNNet

NiNNet出自新加坡国立大学2014年的论文《Network In Network》, 提出了用多层感知机替代先前CNN中简单的线性卷积层,该设计后来为ResNet和Inception等网络模型所借鉴。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import torch
from torch import nn

class NiNNet(nn.Module):
'''
NiNNet网络
INPUT -> 输入数据格式(224, 224, 3), 待分类数(1000)
'''
def __init__(self):
super(NiNNet, self).__init__()
self.part1 = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=4, bias=False),
# 卷积后图像尺寸 (224+3*2-11)/步长+1 = 112
nn.ReLU(inplace=True),
nn.Conv2d(96, 96, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (112-1)/步长+1 = 112
nn.ReLU(inplace=True),
nn.Conv2d(96, 96, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (112-1)/步长+1 = 112
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2)
# 经过池化层后图像尺寸 (112-3)/步长+1 = 55
)
self.part2 = nn.Sequential(
nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2, bias=False),
# 卷积后图像尺寸 (55+2*2-5)/步长+1 = 55
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (55-1)/步长+1 = 55
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (55-1)/步长+1 = 55
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2)
# 经过池化层后图像尺寸 (55-3)/步长+1 = 27
)
self.part3 = nn.Sequential(
nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1, bias=False),
# 卷积后图像尺寸 (27+1*2-3)/步长+1 = 27
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (27-1)/步长+1 = 27
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (27-1)/步长+1 = 27
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2)
# 经过池化层后图像尺寸 (27-3)/步长+1 = 13
)
self.part4 = nn.Sequential(
nn.Conv2d(384, 1024, kernel_size=3, stride=1, padding=1, bias=False),
# 卷积后图像尺寸 (13+1*2-3)/步长+1 = 13
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (13-1)/步长+1 = 13
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (13-1)/步长+1 = 13
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2)
# 经过池化层后图像尺寸 (13-3)/步长+1 = 6
)
self.part5 = nn.Sequential(
nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1, bias=False),
# 卷积后图像尺寸 (6+1*2-3)/步长+1 = 6
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (6-1)/步长+1 = 6
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1000, kernel_size=1, stride=1, bias=False),
# 卷积后图像尺寸 (6-1)/步长+1 = 6
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=6, stride=1)
# 经过池化层后图像尺寸 (6-6)/步长+1 = 1
)

def forward(self, x):
x = self.part1(x)
x = self.part2(x)
x = self.part3(x)
x = self.part4(x)
x = self.part5(x)
logits = x.view(x.size(0), 1000)
probas = torch.softmax(logits, dim=1)
return logits, probas

model = NiNNet()
print(model)

input_tensor = torch.randn((1, 3, 224, 224))
input_var = torch.autograd.Variable(input_tensor)
out = model(input_var)

GoogLeNet和Inception V1

2014年,ImageNet第一名采用了名为GoogLeNet的网络结构。它引入一种用于深度网络的新型构建模块,现在这一模块被称为「Inception」。
Inception模块的目的是扮演一个“多级特征提取器”,在网络深度相同的模块内计算1×1、3×3还有5×5的卷积——这些过滤器的输出在输入至网络下一层之前先被堆栈到channel dimension。

Inception V1在GoogLeNet基础之上,为了减少5x5卷积的计算量,在3x3conv前、5x5conv前、3x3maxpooling后分别加上1x1的卷积核,起到减少总的网络参数数量的作用。
1x1卷积核的最大作用是降低输入特征图的通道数。假设输入为6x6x128的特征图,1x1卷积核为1x1x32(32个通道),则输出为6x6x32。即当1x1卷积核的个数小于输入特征图的通道数时,起到降维的作用。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import torch
from torch import nn

class InceptionBlock(nn.Module):
'''
带降维的多级特征提取器
'''
def __init__(self, input_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool):
super(InceptionBlock, self).__init__()
# 1X1卷积分支
self.branch1 = nn.Sequential(
nn.Conv2d(input_planes, n1x1, kernel_size=1, bias=False),
nn.LocalResponseNorm(n1x1),
nn.ReLU(inplace=True)
)
# 3X3卷积分支
self.branch2 = nn.Sequential(
nn.Conv2d(input_planes, n3x3red, kernel_size=1, bias=False), # 降维
nn.LocalResponseNorm(n3x3red),
nn.ReLU(inplace=True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1, bias=False),
nn.LocalResponseNorm(n3x3),
nn.ReLU(inplace=True)
)
# 5X5卷积分支
self.branch3 = nn.Sequential(
nn.Conv2d(input_planes, n5x5red, kernel_size=1, bias=False), # 降维
nn.LocalResponseNorm(n5x5red),
nn.ReLU(inplace=True),
nn.Conv2d(n5x5red, n5x5, kernel_size=5, padding=2, bias=False),
nn.LocalResponseNorm(n5x5),
nn.ReLU(inplace=True)
)
# 池化分支
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1),
nn.Conv2d(input_planes, pool, kernel_size=1, padding=1, bias=False),
nn.LocalResponseNorm(pool),
nn.ReLU(inplace=True)
)

def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
x4 = self.branch4(x)
x = torch.cat([x1, x2, x3, x4], 1)
return x

class InceptionV1(nn.Module):
'''
InceptionV1网络
INPUT -> 输入数据格式(224, 224, 3), 待分类数(1000)
'''
def __init__(self):
super(InceptionV1, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
# 卷积后图像尺寸 (224+3*2-7)/步长+1 = 112.5
# 输出的图像尺寸不为整数时,卷积向下取整,池化向上取整,因此取112
nn.LocalResponseNorm(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# 经过池化层后图像尺寸 (112-3)/步长+1 = 55.5, 取56
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1, bias=False),
# 卷积后图像尺寸 (56+1*2-3)/步长+1 = 56
nn.LocalResponseNorm(192),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# 经过池化层后图像尺寸 (56+1*2-3)/步长+1 = 28
)
self.inception_3 = nn.Sequential(
InceptionBlock(192, 64, 96, 128, 16, 32, 32),
# 经过Inception_3a图像尺寸不变, 通道数=(64+128+32+32)=256
InceptionBlock(256, 128, 128, 192, 32, 96, 64),
# 经过Inception_3b图像尺寸不变, 通道数=(128+192+96+64)=480
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# 经过池化层后图像尺寸 (28+1*2-3)/步长+1 = 14
)
self.inception_4 = nn.Sequential(
InceptionBlock(480, 192, 96, 208, 16, 48, 64),
# 经过Inception_4a图像尺寸不变, 通道数=(192+208+48+64)=512
InceptionBlock(512, 160, 112, 224, 24, 64, 64),
# 经过Inception_4b图像尺寸不变, 通道数=(160+224+64+64)=512
InceptionBlock(512, 128, 128, 256, 24, 64, 64),
# 经过Inception_4c图像尺寸不变, 通道数=(128+256+64+64)=512
InceptionBlock(512, 112, 144, 288, 32, 64, 64),
# 经过Inception_4d图像尺寸不变, 通道数=(112+144+64+64)=512
InceptionBlock(528, 256, 160, 320, 32, 128, 128),
# 经过Inception_4e图像尺寸不变, 通道数=(256+320+128+128)=832
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# 经过池化层后图像尺寸 (14+1*2-3)/步长+1 = 7
)
self.inception_5 = nn.Sequential(
InceptionBlock(832, 256, 160, 320, 32, 128, 128),
# 经过Inception_5a图像尺寸不变, 通道数=(256+320+128+128)=832
InceptionBlock(832, 384, 192, 384, 48, 128, 128),
# 经过Inception_5b图像尺寸不变, 通道数=(384+384+128+128)=1024
nn.AvgPool2d(kernel_size=7, stride=1)
# 经过池化层后图像尺寸 (7-7)/步长+1 = 1
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.4),
nn.Linear(1024*1*1, 1000)
)

def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.inception_3(x)
x = self.inception_4(x)
x = self.inception_5(x)
x = x.view(x.size(0), 1024*1*1)
x = self.classifier(x)
return

model = InceptionV1()
print(model)

input_tensor = torch.randn((1, 3, 224, 224))
input_var = torch.autograd.Variable(input_tensor)
out = model(input_var)

Inception V2、V3

2015年,一篇论文同时提出了Inception V2和Inception V3两个版本。
Inception V2相比Inception V1进行了如下改进:
使用BatchNormalization,代替Dropout和LRN,其正则化的效果让大型卷积网络的训练速度加快很多倍,同时收敛后的分类准确率也可以得到大幅提高
学习了VGGNet使用两个3x3的卷积核代替5x5的大卷积核,在降低参数量同时提高网络学习能力
Inception V3相比Inception V2进行了如下改进:
采用了卷积分解(Factorization),将7x7卷积分解成两个一维的卷积串联(1x7和7x1),3x3卷积分解为两个一维的卷积串联(1x3和3x1),这样既可以加速计算,又可使网络深度进一步增加,增加了网络的非线性(每增加一层都要进行ReLU)。
另外,网络输入从224x224变为了299x299。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
import torch
import torch.nn as nn

#========================================================
# 多样化的特征提取结构
#========================================================

class InceptionBlockA(nn.Module):
def __init__(self, in_planes, b1_1, b2t_1, b2t_2, b3t_1, b3t_2, b3t_3, b4_1):
super(InceptionBlockA, self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(in_planes, 64, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_planes, 48, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
nn.Conv2d(48, 64, kernel_size=5, stride=1, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.branch3 = nn.Sequential(
nn.Conv2d(in_planes, 64, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 96, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True)
)
self.branch4 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_planes, b4_1, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(b4_1),
nn.ReLU(inplace=True)
)

def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
x4 = self.branch4(x)
return torch.cat([x1, x2, x3, x4], dim=1)

class InceptionBlockB(nn.Module):
def __init__(self, in_planes, b1_1, b3t_1, b3t_2, b3t_3):
super(InceptionBlockB, self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(in_planes, 384, kernel_size=3, stride=2, bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_planes, 64, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 96, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.Conv2d(96, 96, kernel_size=3, stride=2, bias=False),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True)
)
self.branch3 = nn.MaxPool2d(kernel_size=3, stride=2)

def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
return torch.cat([x1, x2, x3], dim=1)

class InceptionBlockC(nn.Module):
def __init__(self, in_planes, b1_1, b2t_1, b2t_2, b2t_3, b3t_1, b3t_2, b3t_3, b3t_4, b3t_5, b4_1):
super(InceptionBlockC, self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(in_planes, 192, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_planes, b2t_1, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(b2t_1),
nn.ReLU(inplace=True),
nn.Conv2d(b2t_1, b2t_1, kernel_size=(1, 7), stride=1, padding=(0, 3), bias=False),
nn.BatchNorm2d(b2t_1),
nn.ReLU(inplace=True),
nn.Conv2d(b2t_1, 192, kernel_size=(7, 1), stride=1, padding=(3, 0), bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
self.branch3 = nn.Sequential(
nn.Conv2d(in_planes, b3t_1, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(b3t_1),
nn.ReLU(inplace=True),
nn.Conv2d(b3t_1, b3t_1, kernel_size=(7, 1), stride=1, padding=(3, 0), bias=False),
nn.BatchNorm2d(b3t_1),
nn.ReLU(inplace=True),
nn.Conv2d(b3t_1, b3t_1, kernel_size=(1, 7), stride=1, padding=(0, 3), bias=False),
nn.BatchNorm2d(b3t_1),
nn.ReLU(inplace=True),
nn.Conv2d(b3t_1, b3t_1, kernel_size=(7, 1), stride=1, padding=(3, 0), bias=False),
nn.BatchNorm2d(b3t_1),
nn.ReLU(inplace=True),
nn.Conv2d(b3t_1, 192, kernel_size=(1, 7), stride=1, padding=(0, 3), bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
self.branch4 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_planes, b4_1, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(b4_1),
nn.ReLU(inplace=True)
)

def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
x4 = self.branch4(x)
return torch.cat([x1, x2, x3, x4], dim=1)

class InceptionBlockD(nn.Module):
def __init__(self, in_planes, b1_1, b1_2, b3t_1, b3t_2, b3t_3, b3t_4):
super(InceptionBlockD, self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(in_planes, 192, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 320, kernel_size=3, stride=2, bias=False),
nn.BatchNorm2d(320),
nn.ReLU(inplace=True)
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_planes, 192, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=(1, 7), stride=1, padding=(0, 3), bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0), bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=3, stride=2, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
self.branch3 = nn.MaxPool2d(kernel_size=3, stride=2)

def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
return torch.cat([x1, x2, x3], dim=1)

class InceptionBlockE(nn.Module):
def __init__(self, in_planes):
super(InceptionBlockE, self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(in_planes, 320, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(320),
nn.ReLU(inplace=True)
)
self.branch2 = nn.Sequential(
nn.Conv2d(in_planes, 384, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch2_1 = nn.Sequential(
nn.Conv2d(384, 384, kernel_size=(1, 3), stride=1, padding=(0, 1), bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch2_2 = nn.Sequential(
nn.Conv2d(384, 384, kernel_size=(3, 1), stride=1, padding=(1, 0), bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch3 = nn.Sequential(
nn.Conv2d(in_planes, 448, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(448),
nn.ReLU(inplace=True),
nn.Conv2d(448, 384, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch3_1 = nn.Sequential(
nn.Conv2d(384, 384, kernel_size=(1, 3), stride=1, padding=(0, 1), bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch3_2 = nn.Sequential(
nn.Conv2d(384, 384, kernel_size=(3, 1), stride=1, padding=(1, 0), bias=False),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True)
)
self.branch4 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_planes, 192, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)

def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x2_1 = self.branch2_1(x2)
x2_2 = self.branch2_2(x2)
x3 = self.branch3(x)
x3_1 = self.branch3_1(x3)
x3_2 = self.branch3_2(x3)
x4 = self.branch4(x)
return torch.cat([x1, x2_1, x2_2, x3_1, x3_2, x4], dim=1)

#========================================================
# 网络框架
#========================================================

class InceptionV3(nn.Module):
'''
InceptionV3网络
INPUT -> 输入数据格式(299, 299, 3), 待分类数(1000)
'''
def __init__(self):
super(InceptionV3, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.conv3 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv4 = nn.Sequential(
nn.Conv2d(64, 80, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(80),
nn.ReLU(inplace=True)
)
self.conv5 = nn.Sequential(
nn.Conv2d(80, 192, kernel_size=3, stride=1, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
self.pool6 = nn.MaxPool2d(kernel_size=3, stride=2)

self.inceptionA1 = InceptionBlockA(192, 64, 48, 64, 64, 96, 96, 32)
self.inceptionA2 = InceptionBlockA(256, 64, 48, 64, 64, 96, 96, 64)
self.inceptionA3 = InceptionBlockA(288, 64, 48, 64, 64, 96, 96, 64)

self.inceptionB = InceptionBlockB(288, 384, 64, 96, 96)

self.inceptionC1 = InceptionBlockC(768, 192, 128, 128, 192, 128, 128, 128, 128, 192, 192)
self.inceptionC2 = InceptionBlockC(768, 192, 160, 160, 192, 160, 160, 160, 160, 192, 192)
self.inceptionC3 = InceptionBlockC(768, 192, 160, 160, 192, 160, 160, 160, 160, 192, 192)
self.inceptionC4 = InceptionBlockC(768, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192)

self.inceptionD = InceptionBlockD(768, 192, 320, 192, 192, 192, 192)
self.inceptionE1 = InceptionBlockE(1280)
self.inceptionE2 = InceptionBlockE(2048)

self.gloab_pool = nn.AvgPool2d(kernel_size=8, stride=1)
self.classifier = nn.Linear(2048, 1000)

def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool6(x)
x = self.inceptionA1(x)
x = self.inceptionA2(x)
x = self.inceptionA3(x)
x = self.inceptionB(x)
x = self.inceptionC1(x)
x = self.inceptionC2(x)
x = self.inceptionC3(x)
x = self.inceptionC4(x)
x = self.inceptionD(x)
x = self.inceptionE1(x)
x = self.inceptionE2(x)
x = self.gloab_pool(x)
x = x.view(x.size(0), 2048)
x = self.classifier(x)
return x

#========================================================
# 主程序
#========================================================

model = InceptionV3()
print(model)

input_tensor = torch.randn((1, 3, 299, 299))
input_var = torch.autograd.Variable(input_tensor)
out = model(input_var)
0%