forked from Shikhargupta/Spiking-Neural-Network
-
Notifications
You must be signed in to change notification settings - Fork 0
/
learning.py
151 lines (114 loc) · 3.5 KB
/
learning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
####################################################### README ####################################################################
# This is the main file which calls all the functions and trains the network by updating weights
#####################################################################################################################################
import numpy as np
from neuron import neuron
import random
from matplotlib import pyplot as plt
from recep_field import rf
import cv2
from spike_train import encode
from rl import rl
from rl import update
from reconstruct import reconst_weights
from parameters import param as par
from var_th import threshold
import os
#potentials of output neurons
pot_arrays = []
for i in range(par.n):
pot_arrays.append([])
#time series
time = np.arange(1, par.T+1, 1)
layer2 = []
# creating the hidden layer of neurons
for i in range(par.n):
a = neuron()
layer2.append(a)
#synapse matrix initialization
synapse = np.zeros((par.n,par.m))
for i in range(par.n):
for j in range(par.m):
synapse[i][j] = random.uniform(0,0.4*par.scale)
for k in range(par.epoch):
for i in range(322,323):
print i," ",k
img = cv2.imread("mnist1/" + str(i) + ".png", 0)
#Convolving image with receptive field
pot = rf(img)
#Generating spike train
train = np.array(encode(pot))
#calculating threshold value for the image
var_threshold = threshold(train)
# print var_threshold
# synapse_act = np.zeros((par.n,par.m))
# var_threshold = 9
# print var_threshold
# var_D = (var_threshold*3)*0.07
var_D = 0.15*par.scale
for x in layer2:
x.initial(var_threshold)
#flag for lateral inhibition
f_spike = 0
img_win = 100
active_pot = []
for index1 in range(par.n):
active_pot.append(0)
#Leaky integrate and fire neuron dynamics
for t in time:
for j, x in enumerate(layer2):
active = []
if(x.t_rest<t):
x.P = x.P + np.dot(synapse[j], train[:,t])
if(x.P>par.Prest):
x.P -= var_D
active_pot[j] = x.P
pot_arrays[j].append(x.P)
# Lateral Inhibition
if(f_spike==0):
high_pot = max(active_pot)
if(high_pot>var_threshold):
f_spike = 1
winner = np.argmax(active_pot)
img_win = winner
print "winner is " + str(winner)
for s in range(par.n):
if(s!=winner):
layer2[s].P = par.Pmin
#Check for spikes and update weights
for j,x in enumerate(layer2):
s = x.check()
if(s==1):
x.t_rest = t + x.t_ref
x.P = par.Prest
for h in range(par.m):
for t1 in range(-2,par.t_back-1, -1):
if 0<=t+t1<par.T+1:
if train[h][t+t1] == 1:
# print "weight change by" + str(update(synapse[j][h], rl(t1)))
synapse[j][h] = update(synapse[j][h], rl(t1))
for t1 in range(2,par.t_fore+1, 1):
if 0<=t+t1<par.T+1:
if train[h][t+t1] == 1:
# print "weight change by" + str(update(synapse[j][h], rl(t1)))
synapse[j][h] = update(synapse[j][h], rl(t1))
if(img_win!=100):
for p in range(par.m):
if sum(train[p])==0:
synapse[img_win][p] -= 0.06*par.scale
if(synapse[img_win][p]<par.w_min):
synapse[img_win][p] = par.w_min
ttt = np.arange(0,len(pot_arrays[0]),1)
Pth = []
for i in range(len(ttt)):
Pth.append(layer2[0].Pth)
#plotting
for i in range(par.n):
axes = plt.gca()
axes.set_ylim([-20,50])
plt.plot(ttt,Pth, 'r' )
plt.plot(ttt,pot_arrays[i])
plt.show()
#Reconstructing weights to analyse training
for i in range(par.n):
reconst_weights(synapse[i],i+1)