-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCIFAR-10-BiLSTM.py
267 lines (225 loc) · 9.36 KB
/
CIFAR-10-BiLSTM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
from network.bilstm_classification import BiLSTM_Sequential_Classification
from network.bilstm_classification import BiLSTM_Deep_Classification
from network.bilstm_classification import BiLSTM_Deep_V_0_1
from network.bilstm_classification import BiLSTM_Deep_V_0_2
from network.bilstm_classification import BiLSTM_Deep_V_0_3
from network.bilstm_classification import BiLSTM_Deep_V_0_4
from network.bilstm_classification import BiLSTM_Deep_V_0_5
from network.bilstm_classification import BiLSTM_Deep_V_0_6
from network.bilstm_classification import RNN_Deep_V_0_7
from helper.parser import define_parser
from keras.applications.densenet import DenseNet121
from tqdm import tqdm
from helper import handle_model as hm
def label_preprocess(label, down_sampling):
if out_sequence:
output_size = int(input_size * down_sampling)
y = np.empty((label.shape[0], output_size, output_size, 1))
for i in range(label.shape[0]):
y[i] = label[i]
# print(y[i])
return keras.utils.to_categorical(y, num_classes)
else:
return keras.utils.to_categorical(label, num_classes)
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def test(model, x_test, y_test, down_sampling):
y_pred = model.predict(x_test)
test_number = y_test.shape[0]
output_size = int(input_size * down_sampling)
y = np.empty((test_number, output_size, output_size, 1)).astype('int32')
# y_final = np.full_like(y_test, 0)
count = 0
for num in tqdm(range(test_number)):
for i in range(y_pred.shape[1]):
for j in range(y_pred.shape[2]):
pred_cls = y_pred[num, i, j, :].tolist()
y[num, i, j, 0] = pred_cls.index(max(pred_cls))
if int(y_test[num]) == int(np.argmax(np.bincount(y[num, :, :, :].flatten()))):
count += 1
return count / test_number
def main():
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
# y_train = keras.utils.to_categorical(y_train, num_classes)
# y_test = keras.utils.to_categorical(y_test, num_classes)
y_train_pro = label_preprocess(y_train, down_sampling_ratio)
y_test_pro = label_preprocess(y_test, down_sampling_ratio)
# model = BiLSTM_Sequential_Classification(input_shape=input_shape, classes=num_classes)
# model = BiLSTM_Deep_Classification(input_shape=input_shape, classes=num_classes)
# model = BiLSTM_Deep_V_0_1(input_shape=input_shape, classes=num_classes)
# model = BiLSTM_Deep_V_0_2(input_shape=input_shape, classes=num_classes)
# model = BiLSTM_Deep_V_0_3(input_shape=input_shape, classes=num_classes)
# model = BiLSTM_Deep_V_0_6(input_shape=input_shape,
# classes=num_classes,
# sequence=out_sequence)
model = RNN_Deep_V_0_7(input_shape=input_shape,
classes=num_classes,
sequence=out_sequence)
#
# if version == 2:
# model = resnet_v2(input_shape=input_shape, depth=depth)
# else:
# model = resnet_v1(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
model.summary()
print(model_type)
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10-%s-{epoch:03d}-{val_acc:.5f}-{val_loss:.5f}.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer, lr_scheduler]
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train_pro,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test_pro),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train_pro, batch_size=batch_size),
validation_data=(x_test, y_test_pro),
epochs=epochs, verbose=1, workers=4,
callbacks=callbacks)
# Score trained model.
if out_sequence:
acc = test(model, x_test, y_test, down_sampling_ratio)
print('Test accuracy:', acc)
else:
scores = model.evaluate(x_test, y_test_pro, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
if __name__ == '__main__':
args = define_parser()
batch_size = 512
if args.bs:
batch_size = args.bs
epochs = 300
if args.ep:
epochs = args.ep
out_sequence = False
if args.seq:
out_sequence = args.seq
# Training parameters
data_augmentation = True
num_classes = 10
if out_sequence:
down_sampling_ratio = 1 / 4 # Todo: Important
else:
down_sampling_ratio = 1 / 32
input_size = 32
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
# Model name, depth and version
model_type = 'RNN_Deep_V_0_7' # Todo: Important
main()