-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathNeuralNet.cpp
110 lines (98 loc) · 3.57 KB
/
NeuralNet.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#include <cmath>
#include <cstdio>
#include <iostream>
#include "NeuralNet.h"
using namespace std;
// Initialize the neural network with the given input parameters, in turn
// initializing each layer with neurons of random weight.
NeuralNet::NeuralNet(int inputs,
int outputs,
int hiddenLayers,
int neuronsPerLayer,
double alpha,
double threshold) {
numInputs = inputs;
numOutputs = outputs;
numHiddenLayers = hiddenLayers;
numNeuronsPerLayer = neuronsPerLayer;
learningRate = alpha;
responseThreshold = threshold;
layers = new vector<Layer*>(hiddenLayers + 2);
// Initialize each hidden layer.
(*layers)[0] = new Layer(inputs, 0);
(*layers)[1] = new Layer(neuronsPerLayer, inputs);
(*layers)[hiddenLayers + 1] = new Layer(outputs, neuronsPerLayer);
for (int i = 2; i < layers->size() - 1; i++) {
(*layers)[i] = new Layer(neuronsPerLayer, neuronsPerLayer);
}
}
NeuralNet::~NeuralNet() {
for (int i = 0; i < layers->size(); i++) {
delete (*layers)[i];
}
delete layers;
}
// Compute the outputs from a given set of inputs.
void NeuralNet::feedForward(vector<double>* inputs,
vector<double>* outputLayer,
const double bias) {
Layer* inputLayer = (*layers)[0];
for (int i = 0; i < inputLayer->neuronCount(); i++) {
inputLayer->getNeuron(i)->setValue((*inputs)[i]);
}
for (int l = 1; l < numHiddenLayers + 2; l++) {
Layer *curr = (*layers)[l], *upstream = (*layers)[l-1];
for (int j = 0; j < curr->neuronCount(); j++) {
Neuron *n = curr->getNeuron(j);
double sum = 0;
for (int i = 0; i < upstream->neuronCount(); i++) {
sum += n->getWeight(i) * upstream->getNeuron(i)->getValue();
}
n->setActivation(sum);
n->setValue(sigmoid(sum));
}
}
Layer* lastLayer = (*layers)[numHiddenLayers+1];
for (int i = 0; i < lastLayer->neuronCount(); i++) {
(*outputLayer)[i] = lastLayer->getNeuron(i)->getValue();
}
}
// Back propagate the errors to update the weights.
void NeuralNet::backPropagate(vector<double>* outputs, int teacher) {
Layer *outputLayer = (*layers)[numHiddenLayers + 1];
for (int i = 0; i < outputs->size(); i++) {
Neuron *n = outputLayer->getNeuron(i);
double adjusted = -n->getValue();
if (i == teacher) {
adjusted += 1;
}
n->setDelta(sigmoidPrime(n->getActivation()) * adjusted);
}
// Propagate deltas backward from output layer to input layer.
for (int l = numHiddenLayers; l >= 0; l--) {
Layer *curr = (*layers)[l], *downstream = (*layers)[l+1];
for (int i = 0; i < curr->neuronCount(); i++) {
double sum = 0;
Neuron *n = curr->getNeuron(i);
for (int j = 0; j < downstream->neuronCount(); j++) {
sum += downstream->getNeuron(j)->getWeight(i)
* downstream->getNeuron(j)->getDelta();
}
n->setDelta(sigmoidPrime(n->getActivation()) * sum);
for (int j = 0; j < downstream->neuronCount(); j++) {
downstream->getNeuron(j)->updateWeight(i,
learningRate * sigmoid(n->getActivation())
* downstream->getNeuron(j)->getDelta());
}
}
}
}
// Compute the sigmoid function.
inline double NeuralNet::sigmoid(double activation) {
return 1.0 / (1.0 + exp(-activation / responseThreshold));
}
// Compute the derivative of the sigmoid function
inline double NeuralNet::sigmoidPrime(double activation) {
double exponential = exp(activation / responseThreshold);
return exponential / (responseThreshold * pow(exponential + 1, 2));
}