-
Notifications
You must be signed in to change notification settings - Fork 814
/
vgg.py
267 lines (230 loc) · 9.76 KB
/
vgg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
VGG for ImageNet.
Introduction
----------------
VGG is a convolutional neural network model proposed by K. Simonyan and A. Zisserman
from the University of Oxford in the paper "Very Deep Convolutional Networks for
Large-Scale Image Recognition" . The model achieves 92.7% top-5 test accuracy in ImageNet,
which is a dataset of over 14 million images belonging to 1000 classes.
Download Pre-trained Model
----------------------------
- Model weights in this example - vgg16_weights.npz : http://www.cs.toronto.edu/~frossard/post/vgg16/
- Model weights in this example - vgg19.npy : https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/
- Caffe VGG 16 model : https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
- Tool to convert the Caffe models to TensorFlow's : https://github.com/ethereon/caffe-tensorflow
Note
------
- For simplified CNN layer see "Convolutional layer (Simplified)"
in read the docs website.
- When feeding other images to the model be sure to properly resize or crop them
beforehand. Distorted images might end up being misclassified. One way of safely
feeding images of multiple sizes is by doing center cropping.
"""
import os
import numpy as np
import tensorlayerx as tlx
from tensorlayerx import logging
from tensorlayerx.files import assign_weights, maybe_download_and_extract
from tensorlayerx.nn import (BatchNorm, Conv2d, Linear, Flatten, Input, Sequential, MaxPool2d)
from tensorlayerx.nn import Module
__all__ = [
'VGG',
'vgg16',
'vgg19',
'VGG16',
'VGG19',
# 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
# 'vgg19_bn', 'vgg19',
]
layer_names = [
['conv1_1', 'conv1_2'], 'pool1', ['conv2_1', 'conv2_2'], 'pool2',
['conv3_1', 'conv3_2', 'conv3_3', 'conv3_4'], 'pool3', ['conv4_1', 'conv4_2', 'conv4_3', 'conv4_4'], 'pool4',
['conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'], 'pool5', 'flatten', 'fc1_relu', 'fc2_relu', 'outputs'
]
cfg = {
'A': [[64], 'M', [128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'],
'B': [[64, 64], 'M', [128, 128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'],
'D':
[
[64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F',
'fc1', 'fc2', 'O'
],
'E':
[
[64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512],
'M', 'F', 'fc1', 'fc2', 'O'
],
}
mapped_cfg = {
'vgg11': 'A',
'vgg11_bn': 'A',
'vgg13': 'B',
'vgg13_bn': 'B',
'vgg16': 'D',
'vgg16_bn': 'D',
'vgg19': 'E',
'vgg19_bn': 'E'
}
model_urls = {
'vgg16': 'https://git.openi.org.cn/attachments/760835b9-db71-4a00-8edd-d5ece4b6b522?type=0',
'vgg19': 'https://git.openi.org.cn/attachments/503c8a6c-705f-4fb6-ba18-03d72b6a949a?type=0'
}
model_saved_name = {'vgg16': 'vgg16_weights.npz', 'vgg19': 'vgg19.npy'}
class VGG(Module):
def __init__(self, layer_type, batch_norm=False, end_with='outputs', name=None):
super(VGG, self).__init__(name=name)
self.end_with = end_with
config = cfg[mapped_cfg[layer_type]]
self.make_layer = make_layers(config, batch_norm, end_with)
def forward(self, inputs):
"""
inputs : tensor
Shape [None, 224, 224, 3], value range [0, 1].
"""
# inputs = inputs * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3])
inputs = inputs * 255. - tlx.convert_to_tensor(np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape(-1,1,1))
out = self.make_layer(inputs)
return out
def make_layers(config, batch_norm=False, end_with='outputs'):
layer_list = []
is_end = False
for layer_group_idx, layer_group in enumerate(config):
if isinstance(layer_group, list):
for idx, layer in enumerate(layer_group):
layer_name = layer_names[layer_group_idx][idx]
n_filter = layer
if idx == 0:
if layer_group_idx > 0:
in_channels = config[layer_group_idx - 2][-1]
else:
in_channels = 3
else:
in_channels = layer_group[idx - 1]
layer_list.append(
Conv2d(
out_channels=n_filter, kernel_size=(3, 3), stride=(1, 1), act=tlx.ReLU, padding='SAME',
in_channels=in_channels, name=layer_name, data_format='channels_first'
)
)
if batch_norm:
layer_list.append(BatchNorm(num_features=n_filter, data_format='channels_first'))
if layer_name == end_with:
is_end = True
break
else:
layer_name = layer_names[layer_group_idx]
if layer_group == 'M':
layer_list.append(MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding='SAME', name=layer_name, data_format='channels_first'))
elif layer_group == 'O':
layer_list.append(Linear(out_features=1000, in_features=4096, name=layer_name))
elif layer_group == 'F':
layer_list.append(Flatten(name='flatten'))
elif layer_group == 'fc1':
layer_list.append(Linear(out_features=4096, act=tlx.ReLU, in_features=512 * 7 * 7, name=layer_name))
elif layer_group == 'fc2':
layer_list.append(Linear(out_features=4096, act=tlx.ReLU, in_features=4096, name=layer_name))
if layer_name == end_with:
is_end = True
if is_end:
break
return Sequential(layer_list)
def restore_model(model, layer_type):
logging.info("Restore pre-trained weights")
# download weights
maybe_download_and_extract(model_saved_name[layer_type], 'model', model_urls[layer_type])
weights = []
if layer_type == 'vgg16':
npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True)
# get weight list
for val in sorted(npz.items()):
logging.info(" Loading weights %s in %s" % (str(val[1].shape), val[0]))
weights.append(val[1])
if len(model.all_weights) == len(weights):
break
elif layer_type == 'vgg19':
npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True, encoding='latin1').item()
# get weight list
for val in sorted(npz.items()):
logging.info(" Loading %s in %s" % (str(val[1][0].shape), val[0]))
logging.info(" Loading %s in %s" % (str(val[1][1].shape), val[0]))
weights.extend(val[1])
if len(model.all_weights) == len(weights):
break
# assign weight values
if tlx.BACKEND != 'tensorflow':
for i in range(len(weights)):
if len(weights[i].shape) == 4:
weights[i] = np.transpose(weights[i], axes=[3, 2, 0, 1])
assign_weights(weights, model)
del weights
def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None):
"""Pre-trained VGG16 model.
Parameters
------------
pretrained : boolean
Whether to load pretrained weights. Default False.
end_with : str
The end point of the model. Default ``fc3_relu`` i.e. the whole model.
mode : str.
Model building mode, 'dynamic' or 'static'. Default 'dynamic'.
name : None or str
A unique layer name.
Examples
---------
Classify ImageNet classes with VGG16, see `tutorial_models_vgg.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_vgg.py>`__
With TensorLayer
TODO Modify the usage example according to the model storage location
>>> # get the whole model, without pre-trained VGG parameters
>>> vgg = vgg16()
>>> # get the whole model, restore pre-trained VGG parameters
>>> vgg = vgg16(pretrained=True)
>>> # use for inferencing
>>> output = vgg(img)
>>> probs = tlx.ops.softmax(output)[0].numpy()
"""
if mode == 'dynamic':
model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name)
elif mode == 'static':
raise NotImplementedError
else:
raise Exception("No such mode %s" % mode)
if pretrained:
restore_model(model, layer_type='vgg16')
return model
def vgg19(pretrained=False, end_with='outputs', mode='dynamic', name=None):
"""Pre-trained VGG19 model.
Parameters
------------
pretrained : boolean
Whether to load pretrained weights. Default False.
end_with : str
The end point of the model. Default ``fc3_relu`` i.e. the whole model.
mode : str.
Model building mode, 'dynamic' or 'static'. Default 'dynamic'.
name : None or str
A unique layer name.
Examples
---------
Classify ImageNet classes with VGG19, see `tutorial_models_vgg.py <https://github.com/tensorlayer/TensorLayerX/blob/main/examples/model_zoo/vgg.py>`__
With TensorLayerx
>>> # get the whole model, without pre-trained VGG parameters
>>> vgg = vgg19()
>>> # get the whole model, restore pre-trained VGG parameters
>>> vgg = vgg19(pretrained=True)
>>> # use for inferencing
>>> output = vgg(img)
>>> probs = tlx.ops.softmax(output)[0].numpy()
"""
if mode == 'dynamic':
model = VGG(layer_type='vgg19', batch_norm=False, end_with=end_with, name=name)
elif mode == 'static':
raise NotImplementedError
else:
raise Exception("No such mode %s" % mode)
if pretrained:
restore_model(model, layer_type='vgg19')
return model
VGG16 = vgg16
VGG19 = vgg19