-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
90 lines (76 loc) · 3.08 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os
import csv
import cv2
import numpy as np
from scipy import ndimage
# read driving log from csv file
lines = []
with open('/opt/carnd_p3/data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
lines.append(line)
import sklearn
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
np.random.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
current_path = '/opt/carnd_p3/data/' + batch_sample[0]
center_image = ndimage.imread(current_path)
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
images.append(cv2.flip(center_image,1))
angles.append(center_angle*-1)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# Set our batch size
batch_size = 32
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
# creat model using keras
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Lambda, MaxPooling2D, Cropping2D
from keras.layers.convolutional import Convolution2D
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((60,20),(0,0))))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation='relu'))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation='relu'))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation='relu'))
model.add(Convolution2D(64,3,3,activation='relu'))
model.add(Convolution2D(64,3,3,activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
# train the model
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, \
steps_per_epoch=np.ceil(len(train_samples)/batch_size), \
validation_data=validation_generator, \
validation_steps=np.ceil(len(validation_samples)/batch_size), \
epochs=4, verbose=1)
# save the model
model.save('model.h5')
import matplotlib.pyplot as plt
# plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
exit()