-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_cnn_face.py
156 lines (115 loc) · 4.12 KB
/
train_cnn_face.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 14:03:22 2018
@author: achyutajha
"""
import cv2
import os
from os import walk
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout,Activation,Flatten
from keras.optimizers import Adam
import numpy as np
basedir='/Users/achyutajha/Documents/PSU Study Mat/Fall-II/Deep Learning/Project/Data/KaggleData/fer2013/'
training_path = basedir + 'Training'
testing_path = basedir + 'PublicTest'
num_classes = 7
################## Data Collection and preprocessing stages ##################
def img_to_matrix(imagePath):
image=cv2.imread(imagePath)
image=cv2.resize(image, (48,48))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
def prepare_data(path):
X=[]
Y=[]
labels = []
for (_, dirnames, _) in walk(path):
labels.extend(dirnames)
for label in labels:
for root, dirs, files in os.walk(os.path.abspath(path+'/'+label)):
for file in files:
imagePath=root +'/'+ file
image=img_to_matrix(imagePath)
X.append(image)
Y.append([int(label)])
return X,Y
def preprocess(X,Y):
flat_X = np.array(X)
flat_Y = np.array(Y)
flat_X = flat_X.astype('float32')
flat_X/=255
flat_Y = keras.utils.to_categorical(flat_Y, num_classes)
return flat_X,flat_Y
X_train,Y_train = prepare_data(training_path)
X_test,Y_test = prepare_data(testing_path)
flat_X_train,flat_Y_train = preprocess(X_train,Y_train)
flat_X_test,flat_Y_test = preprocess(X_test,Y_test)
flat_X_train = flat_X_train.reshape(flat_X_train.shape[0], 48, 48, 1)
flat_X_test = flat_X_test.reshape(flat_X_test.shape[0], 48, 48, 1)
from keras import regularizers
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
################### Build the neural network ##################
model_8 = Sequential()
#1st block
model_8.add(Conv2D(64, (3,3), strides = (1,1), padding='same',
input_shape = flat_X_train.shape[1:],
kernel_initializer="lecun_uniform",
kernel_regularizer=regularizers.l2(0)))
model_8.add(BatchNormalization())
model_8.add(Activation('tanh'))
model_8.add(MaxPooling2D(pool_size=(2, 2)))
model_8.add(Dropout(0.25))
#2nd block
model_8.add(Conv2D(128, (5,5), strides = (1,1),kernel_regularizer=regularizers.l2(0)))
model_8.add(BatchNormalization())
model_8.add(Activation('tanh'))
model_8.add(MaxPooling2D(pool_size=(2, 2)))
model_8.add(Dropout(0.25))
#3rd block
model_8.add(Conv2D(512, (3,3), strides = (1,1), padding='same',kernel_regularizer=regularizers.l2(0)))
model_8.add(BatchNormalization())
model_8.add(Activation('tanh'))
model_8.add(MaxPooling2D(pool_size=(2, 2)))
model_8.add(Dropout(0.5))
#4th block
model_8.add(Conv2D(512, (3,3), strides = (1,1), padding='same',kernel_regularizer=regularizers.l2(0)))
model_8.add(BatchNormalization())
model_8.add(Activation('tanh'))
model_8.add(MaxPooling2D(pool_size=(2, 2)))
model_8.add(Dropout(0.1))
#5th block
model_8.add(Flatten())
model_8.add(Dense(256,kernel_initializer="lecun_uniform"))
model_8.add(BatchNormalization())
model_8.add(Activation('relu'))
model_8.add(Dropout(0.5))
model_8.add(Dense(512,kernel_initializer="lecun_uniform"))
model_8.add(BatchNormalization())
model_8.add(Activation('relu'))
model_8.add(Dropout(0.5))
model_8.add(Dense(num_classes))
model_8.add(Activation('softmax'))
model_8.summary()
learning_rate = .001
# sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
model_8.compile(loss=keras.losses.categorical_crossentropy,
optimizer=Adam(lr=learning_rate),
metrics=['accuracy'])
from keras.callbacks import EarlyStopping
earlystop = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=7, \
verbose=1, mode='auto')
callbacks = [earlystop]
batch_size = 128
epochs = 35
history_8 = model_8.fit(
flat_X_train, flat_Y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=(flat_X_test,flat_Y_test))
model_8.save('./face_cnn_model.h5')