forked from udacity/CarND-Behavioral-Cloning-P3
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
133 lines (107 loc) · 5.02 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
from keras.models import *
from keras.callbacks import *
from keras.layers import Lambda, Convolution2D, Activation, Dropout, Flatten, Dense
import keras.backend as K
import cv2
import argparse
import data
import pickle
# This model is an NVIDIA Variant. I used SullyChen's ideas, with
# jacobgil's keras implementation. Small changes (based on Comma AI) to
# add in the Lambda and move the regularization step into the model, makes
# debugging the images much easier
#http://github.com/SullyChen/Autopilot-TensorFlow/
#https://github.com/jacobgil/keras-steering-angle-visualizations.git
# -- needed to turn dropout back on!
def nvidia_net():
model = Sequential()
#p=0.33
p=0.5
# this lambda function normalizes the values (0-255 to -1 to 1 of each pixel)
# SullyChen used 66x200 color images. I tried a different model with grayscale, and that worked well too
model.add(Lambda(lambda x: x/127.5 - 1., input_shape=(66,200,3), output_shape=(66,200,3)))
model.add(Convolution2D(24, 5, 5, init = 'normal', subsample= (2, 2), name='conv1_1', border_mode='valid'))
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, init = 'normal', subsample= (2, 2), border_mode='valid',name='conv2_1'))
model.add(Activation('relu'))
model.add(Convolution2D(48, 5, 5, init = 'normal', subsample= (2, 2), border_mode='valid',name='conv3_1'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, init = 'normal', subsample= (1, 1), border_mode='valid',name='conv4_1'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, init = 'normal', subsample= (1, 1), border_mode='valid',name='conv4_2'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(1164, init = 'normal', name = "dense_0"))
model.add(Activation('relu'))
model.add(Dropout(p))
model.add(Dense(100, init = 'normal', name = "dense_1"))
model.add(Activation('relu'))
model.add(Dropout(p))
model.add(Dense(50, init = 'normal', name = "dense_2"))
model.add(Activation('relu'))
model.add(Dropout(p))
model.add(Dense(10, init = 'normal', name = "dense_3"))
model.add(Activation('tanh'))
model.add(Dense(1, init = 'normal', name = "dense_4"))
return model
def get_model():
model = nvidia_net()
model.compile(loss = 'mse', optimizer = 'Adam')
return model
def load_model(path):
model = nvidia_net()
model.load_weights(path)
model.compile(loss = 'mse', optimizer = 'Adam')
return model
#epochs=75
#epochs=20
epochs=20
#epochs=20
#epochs=12
def train():
#
# SaveModel is a CallBack class that we can use to save the model for each epoch
# This allows us to easily test each epoch on the simulator. The simulator seems like
# a better validation than just the validation data set
class SaveModel(Callback):
def on_epoch_end(self, epoch, logs={}):
epoch += 1
if (epoch>0):
#with open ('model-' + str(epoch) + '.json', 'w') as file:
# file.write (model.to_json ())
# file.close ()
#model.save_weights ('model-' + str(epoch) + '.h5')
model.save('model-'+str(epoch)+'.h5')
#
# load the model
#
model = get_model()
# Keras has a nice tool to create an image of our network
from keras.utils.visualize_util import plot
plot(model, to_file='car_model.png',show_shapes=True)
print ("Loaded model")
# load the data
xs,ys = data.loadTraining()
# split the dataset into training and validation 80% / 20%
train_xs = xs[:int(len(xs) * 0.8)]
train_ys = ys[:int(len(xs) * 0.8)]
val_xs = xs[-int(len(xs) * 0.2):]
val_ys = ys[-int(len(xs) * 0.2):]
# load the validation dataset, it is better not generate an image each time - process them once
# Use the validation process function, it doesn't augment the image, just resizes it
X, y = data.getValidationDataset(val_xs,val_ys,data.processImageValidation)
print (model.summary())
print ("Loaded validation datasetset")
print ("Total of", len(train_ys))
print ("Training..")
checkpoint_path="weights.{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(checkpoint_path, verbose=1, save_best_only=False, save_weights_only=False, mode='auto')
# I tried using the earlystopping callback, but now I run it for a fixed number of epochs and test to see which is best
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=1, mode='auto')
res=model.fit_generator(data.generator(train_xs,train_ys,256), validation_data = (X, y), samples_per_epoch = 100*256, nb_epoch=epochs, verbose = 1 ,callbacks = [ SaveModel()])
# pickle and dump the history so we can graph it in a notebook
history=res.history
with open('history.p','wb') as f:
pickle.dump(history,f)
if __name__ == "__main__":
train()