-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathNN.py
148 lines (115 loc) · 4.07 KB
/
NN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import numpy as np
class Network:
def __init__(self):
self.layers = []
self.error_history = []
def add_layer(self, layer):
if len(self.layers) > 0:
layer.connect(self.layers[-1])
else:
layer.connect()
self.layers.append(layer)
def compute(self, x):
result = np.array(x)
for layer in self.layers:
result = layer.compute(result)
return result
def predict(self, x):
result = []
for xx in x:
result.append(self.compute(xx))
return np.array(result)
def train(self, x, y, eta=0.01, threshold=1e-3, max_iters=None):
x = np.array(x)
y = np.array(y)
train_set_size = len(x)
index = 0
count = 0
error = np.array([100.0] * train_set_size)
batch = 1
while True:
input = x[index]
label = y[index]
output = self.compute(input)
d = label - output
index = (index + 1) % len(x)
count += 1
error[index] = float(np.sqrt(np.dot(d, d)))
mean_abs_error = np.mean(error)
if count % train_set_size == 0:
self.error_history.append(mean_abs_error)
print("Training. Batch {:6d}. mean absolute error={:f}".format(batch, mean_abs_error))
batch += 1
if np.all(error < threshold) or (max_iters is not None and count > max_iters):
break
self.back_propagation(d)
self.update(eta)
def back_propagation(self, d):
for layer in self.layers[::-1]:
layer.back_propagation(d)
def update(self, eta):
for layer in self.layers:
layer.update(eta)
class Layer:
def __init__(self, number_of_neurons=10, input_size=5, activation="sigmoid"):
self.number_of_neurons = number_of_neurons
self.activation = activation
self.neurons = []
self.input_size = input_size
self.next_layer = None
def set_next_layer(self, layer):
self.next_layer = layer
def connect(self, last_layer=None):
if last_layer is not None:
self.input_size = last_layer.get_output_size()
last_layer.set_next_layer(self)
for i in np.arange(0, self.number_of_neurons):
self.neurons.append(Neuron(self, i, self.activation))
def get_output_size(self):
return len(self.neurons)
def compute(self, x):
output = []
for neuron in self.neurons:
output.append(neuron.compute(x))
return output
def back_propagation(self, d):
for neuron in self.neurons:
neuron.back_propagation(d)
def update(self, eta):
for neuron in self.neurons:
neuron.update(eta)
class Neuron:
def __init__(self, layer, no, activation="sigmoid"):
self.no = no
self.layer = layer
self.weights = np.array(np.random.rand(self.layer.input_size))
self.activation = activation
self.delta = 0.0
self.activation_level = 0.0
self.input = None
@staticmethod
def sigmoid(x):
return 1.0 / (1.0 + np.power(np.e, -x))
@staticmethod
def sigmoid_grad(x):
return np.power(np.e, -x) / np.power(1 + np.power(np.e, -x), 2)
def compute(self, x):
self.input = x
self.activation_level = np.dot(self.input, self.weights)
if self.activation == "sigmoid":
return Neuron.sigmoid(self.activation_level)
else:
return self.activation_level
def back_propagation(self, d):
if self.layer.next_layer is not None:
tmp = 0.0
for neuron in self.layer.next_layer.neurons:
tmp += neuron.delta * neuron.weights[self.no]
else:
tmp = d[self.no]
if self.activation == "sigmoid":
self.delta = tmp * Neuron.sigmoid_grad(self.activation_level)
else:
self.delta = tmp
def update(self, eta):
self.weights += eta * self.delta * np.array(self.input)