Skip to content

Commit 722191a

Browse files
author
Paul Reesman
committed
Made progress on neural net for hard mode
1 parent 6937142 commit 722191a

20 files changed

+437
-41
lines changed

Cargo.toml

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
[package]
22
name = "evil_penguin"
3-
version = "0.8.1"
3+
version = "0.8.2"
44
authors = ["Paul Reesman <[email protected]>"]
55
edition = "2018"
66

77
[dependencies]
88
const-concat = { git = "https://github.com/reesmanp/const-concat" }
9+
rand = "0.7.3"
910

1011
[target.'cfg(target_os = "windows")'.dependencies.amethyst]
1112
version = "^0.15.0"

src/components/entities/coin.component.rs

+2
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ use amethyst::{
88
#[derive(Component, Default)]
99
#[storage(VecStorage)]
1010
pub struct CoinComponent {
11+
pub id: usize,
12+
pub total_amount: usize,
1113
pub frame: usize,
1214
pub frames: usize,
1315
pub time_per_frame: f32,

src/main.rs

+1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ use amethyst::{
1010
};
1111

1212
mod components;
13+
mod ml;
1314
mod states;
1415
mod systems;
1516
mod util;

src/ml/mod.rs

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
pub mod neural_network;

src/ml/neural_network/mod.rs

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pub mod util;
2+
mod neural_network;
3+
mod neuron;
4+
5+
pub use neural_network::NeuralNetwork;
6+
pub use neuron::Neuron;
+190
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
use crate::{
2+
ml::neural_network::Neuron,
3+
util::types::{
4+
InstanceInput,
5+
NeuralNetworkLayer,
6+
ProblemSet
7+
}
8+
};
9+
10+
use rand::Rng;
11+
12+
pub struct NeuralNetwork {
13+
pub layers: Vec<NeuralNetworkLayer>,
14+
test_instances: usize,
15+
tests_correct: usize,
16+
pub update_bias: bool,
17+
pub mse: f32
18+
}
19+
20+
impl NeuralNetwork {
21+
pub fn new(layout: Vec<usize>, learning_rate: f32, feature_amount: usize) -> Self {
22+
let mut layers = Vec::with_capacity(layout.len());
23+
let mut rng = rand::thread_rng();
24+
// The initial input is the features themselves
25+
let mut next_input_size = feature_amount;
26+
27+
// Find the size of each layer
28+
for node_amount in layout {
29+
// Auto generate a bias [0.0, 1.0]
30+
let bias = rng.gen();
31+
let mut layer = Vec::with_capacity(node_amount);
32+
33+
// Instantiate the correct amount of neurons for this layer
34+
for _ in 0..node_amount {
35+
// Make sure the amount of weights equals the size of the input to this layer
36+
let mut weights = Vec::with_capacity(next_input_size);
37+
38+
// Auto generate weights [0.0, 1.0]
39+
for _ in 0..next_input_size {
40+
weights.push(rng.gen());
41+
}
42+
43+
// Instantiate neuron
44+
let node = Neuron::new(weights, bias, learning_rate);
45+
layer.push(node);
46+
}
47+
48+
// Add layer to network along with its associated generated bias value
49+
layers.push(layer);
50+
// The size of the next input is the size of this layers output
51+
next_input_size = node_amount;
52+
}
53+
54+
Self {
55+
layers,
56+
test_instances: 0,
57+
tests_correct: 0,
58+
update_bias: true,
59+
mse: 0.0
60+
}
61+
}
62+
63+
fn feed_forward(&mut self, instance: InstanceInput) {
64+
let mut this_input = instance;
65+
66+
for nodes in &mut self.layers {
67+
let mut next_input = Vec::with_capacity(nodes.len());
68+
69+
for node in nodes {
70+
node.calc_activity(this_input.clone());
71+
node.calc_activation();
72+
73+
// The output of this node is an input to the next layer
74+
next_input.push(node.activation);
75+
}
76+
77+
// The outputs of this layer is the inputs of the next layer
78+
this_input = next_input;
79+
}
80+
}
81+
82+
fn back_propagation(&mut self, expected_output: f32) {
83+
let mut is_output_layer = true;
84+
let mut error_sum_vec: Vec<f32>;
85+
let mut next_error_sum_vec: Vec<f32> = vec![];
86+
87+
// For every layer, working backwards
88+
for layer in &mut self.layers.iter_mut().rev() {
89+
error_sum_vec = next_error_sum_vec;
90+
next_error_sum_vec = Vec::with_capacity(layer.len());
91+
92+
// Make sure the vector has enough elements
93+
for _ in 0..layer[0].get_weights().len() {
94+
next_error_sum_vec.push(0.0);
95+
}
96+
97+
for node in layer {
98+
if is_output_layer {
99+
node.calc_delta_weights(expected_output - node.activation);
100+
} else {
101+
node.calc_delta_weights(error_sum_vec.remove(0));
102+
}
103+
104+
// Determine the appropriate error value for the next iteration
105+
for index in 0..next_error_sum_vec.len() {
106+
next_error_sum_vec[index] += node.weights[index] * node.delta;
107+
}
108+
109+
// Find the delta for the weight associated with the bias
110+
node.calc_delta_bias()
111+
}
112+
113+
is_output_layer = false;
114+
}
115+
}
116+
117+
/// All the weights need to be updated at the same time
118+
/// So here we update the weights based off of the calculated deltas
119+
fn update_weights(&mut self) {
120+
for layer in &mut self.layers {
121+
for node in layer {
122+
node.update_weights();
123+
124+
if self.update_bias {
125+
node.update_bias();
126+
}
127+
}
128+
}
129+
}
130+
131+
// TODO: Figure out later
132+
// pub fn train(&mut self, training_set: ProblemSet, epochs: usize, is_online: bool) {
133+
// for _ in 0..epochs {
134+
// for (training_instance, expected_output) in training_set.clone() {
135+
// // Feed Forward
136+
// self.feed_forward(training_instance);
137+
//
138+
// // Back Propagation
139+
// self.back_propagation(expected_output);
140+
//
141+
// // Update weights
142+
// if is_online {
143+
// self.update_weights();
144+
// }
145+
// }
146+
//
147+
// if !is_online {
148+
// self.update_weights();
149+
// }
150+
// }
151+
// }
152+
153+
pub fn test(&mut self, test_instance: Vec<f32>, maybe_threshold: Option<f32>) -> Vec<f32> {
154+
self.feed_forward(test_instance);
155+
156+
self.layers
157+
.last()
158+
.unwrap()
159+
.iter()
160+
.map(|neuron| neuron.activation)
161+
.collect::<Vec<f32>>()
162+
}
163+
164+
pub fn print(&self) {
165+
for layer in &self.layers {
166+
println!("********************");
167+
for neuron in layer {
168+
println!("Bias: {}", neuron.bias);
169+
println!("{:?}", neuron.get_weights());
170+
}
171+
println!("********************");
172+
}
173+
}
174+
175+
pub fn accuracy(&self, is_classification: bool) -> Option<f32> {
176+
if self.test_instances == 0 {
177+
None
178+
} else {
179+
if is_classification {
180+
Some((self.tests_correct as f32 / self.test_instances as f32) * 100.0)
181+
} else {
182+
Some(self.mse)
183+
}
184+
}
185+
}
186+
187+
pub fn get_output(&self) -> Vec<f32> {
188+
self.layers[self.layers.len() - 1].iter().map(|node| node.activation).collect::<Vec<f32>>()
189+
}
190+
}

src/ml/neural_network/neuron.rs

+121
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
use crate::{
2+
ml::neural_network::{
3+
util::sigmoid::{
4+
sigmoid,
5+
sigmoid_delta
6+
}
7+
},
8+
util::{
9+
constants::{
10+
DEFAULT_LEARNING_RATE,
11+
DEFAULT_NUM_OF_WEIGHTS
12+
},
13+
types::{
14+
InstanceInput,
15+
ProblemSet,
16+
Weights
17+
}
18+
}
19+
};
20+
21+
use rand::prelude::*;
22+
23+
pub struct Neuron {
24+
inputs: Vec<f32>,
25+
26+
pub weights: Weights,
27+
pub delta_weights: Weights,
28+
29+
pub bias: f32,
30+
delta_bias: f32,
31+
32+
pub activity: f32,
33+
34+
learning_rate: f32,
35+
36+
pub activation: f32,
37+
pub delta: f32
38+
}
39+
40+
impl Neuron {
41+
pub fn new(weights: Weights, bias: f32, learning_rate: f32) -> Self {
42+
let mut delta_weights = weights.clone();
43+
for index in 0..weights.len() {
44+
delta_weights[index] = 0.0;
45+
}
46+
47+
Self {
48+
inputs: vec![],
49+
weights,
50+
delta_weights,
51+
bias,
52+
delta_bias: 0.0,
53+
activity: 0.0,
54+
activation: 0.0,
55+
delta: 0.0,
56+
learning_rate
57+
}
58+
}
59+
60+
pub fn calc_activity(&mut self, input_vec: InstanceInput) {
61+
let mut activity_value = self.bias;
62+
63+
for (weight, value) in self.weights.iter().zip(input_vec.iter()) {
64+
activity_value += weight * value;
65+
}
66+
67+
self.activity = activity_value;
68+
self.inputs = input_vec;
69+
}
70+
71+
pub fn calc_activation(&mut self) {
72+
self.activation = sigmoid(self.activity);
73+
}
74+
75+
pub fn calc_delta_weights(&mut self, error_value: f32) {
76+
self.delta = sigmoid_delta(
77+
self.activation,
78+
error_value
79+
);
80+
81+
for index in 0..self.weights.len() {
82+
self.delta_weights[index] = self.learning_rate
83+
* self.delta
84+
* self.inputs[index];
85+
}
86+
}
87+
88+
pub fn update_weights(&mut self) {
89+
for index in 0..self.weights.len() {
90+
self.weights[index] += self.delta_weights[index];
91+
self.delta_weights[index] = 0.0;
92+
self.delta = 0.0;
93+
}
94+
}
95+
96+
/// Assumes delta weights has been calculated first
97+
pub fn calc_delta_bias(&mut self) {
98+
self.delta_bias = self.learning_rate * self.delta;
99+
}
100+
101+
pub fn update_bias(&mut self) {
102+
self.bias += self.delta_bias;
103+
self.delta_bias = 0.0;
104+
}
105+
106+
pub fn get_weights(&self) -> &Weights {
107+
&self.weights
108+
}
109+
}
110+
111+
impl Default for Neuron {
112+
fn default() -> Self {
113+
let mut rng = rand::thread_rng();
114+
let mut weight_vec = Vec::with_capacity(DEFAULT_NUM_OF_WEIGHTS);
115+
for _ in 0..DEFAULT_NUM_OF_WEIGHTS {
116+
weight_vec.push(rng.gen());
117+
}
118+
119+
Self::new(weight_vec, rng.gen(), DEFAULT_LEARNING_RATE)
120+
}
121+
}

src/ml/neural_network/util/mod.rs

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
pub mod sigmoid;

src/ml/neural_network/util/sigmoid.rs

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
use std::f64::consts::E;
2+
3+
pub fn sigmoid(activity_value: f32) -> f32 {
4+
(1.0 / (1.0 + E.powf(-activity_value as f64))) as f32
5+
}
6+
7+
/// error_value
8+
/// desired_output - actual_output ==> output_node
9+
/// Sum(outbound_weight * previous_sigmoid_delta) ==> hidden node
10+
pub fn sigmoid_delta(neuron_activation_value: f32, error_value: f32) -> f32 {
11+
neuron_activation_value * (1.0 - neuron_activation_value) * error_value
12+
}

src/states/loading.state.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ use crate::{
2222
}
2323
},
2424
systems::movement::Difficulty,
25-
util::types::SpritesheetLoadingData
25+
util::types::SpriteSheetLoadingData
2626
};
2727

2828
use std::collections::HashMap;
@@ -102,7 +102,7 @@ impl LoadingState {
102102
fn load_sprite_sheet(
103103
&mut self,
104104
world: &mut World,
105-
(sprite_name, sprite_sheet_path, ron_path): SpritesheetLoadingData
105+
(sprite_name, sprite_sheet_path, ron_path): SpriteSheetLoadingData
106106
) {
107107
let texture_handle = {
108108
let loader = world.read_resource::<Loader>();

src/states/menu/main_menu.state.rs

+2
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,8 @@ impl<'a, 'b> SimpleState for MainMenuState<'a, 'b> {
5252
} else if Some(ui_event.target) == hard {
5353
return Trans::Switch(Box::new(LoadingState::new(NextLoadingState::Run(Difficulty::Hard))))
5454
}
55+
} else if ui_event.event_type == UiEventType::HoverStart {
56+
// TODO: change cursor
5557
}
5658
}
5759

0 commit comments

Comments
 (0)