Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

python version of multiple hidden layers neural network library #153

Open
wants to merge 48 commits into
base: deeplearn
Choose a base branch
from

Conversation

pythonAIdeveloper
Copy link

`
import random
import math

def sigmoid(x, a, b):
return 1/(1 + math.exp(-x))

def dsigmoid(y, a, b):
return y * (1 - y)

class Matrix():
def init(self, rows, cols):
self.rows = rows
self.cols = cols
self.data = []

    for i in range(self.rows):
        self.data.append([])
        for j in range(self.cols):
            self.data[i].append(0)

#vector operations
@staticmethod
def MatrixMultiply(m1, m2):
if m1.cols != m2.rows:
return "you did something wrong"
else:
result = Matrix(m1.rows, m2.cols)
for i in range(result.rows):
for j in range(result.cols):
sum = 0
for k in range(m1.cols):
sum += m1.data[i][k] * m2.data[k][j]
result.data[i][j] = sum
return result

#converting array into matrix
@staticmethod
def fromArray(arr):
m = Matrix(len(arr), 1)
for i in range(len(arr)):
m.data[i][0] = arr[i]
return m

#converting matrix into array
def toArray(self):
arr = []
for i in range(self.rows):
for j in range(self.cols):
arr.append(self.data[i][j])
return arr

#scalar operations
def multiply(self, n):
if type(n) == Matrix:
for i in range(self.rows):
for j in range(self.cols):
self.data[i][j] *= n.data[i][j]
else:
for i in range(self.rows):
for j in range(self.cols):
self.data[i][j] *= n

def add(self, n):
    if type(n) == Matrix:
        for i in range(self.rows):
            for j in range(self.cols):
                self.data[i][j] += n.data[i][j]

    else:
        for i in range(self.rows):
            for j in range(self.cols):
                self.data[i][j] += n

@staticmethod
def subtract(a, b):
    result = Matrix(a.rows, a.cols)
    for i in range(a.rows):
        for j in range(a.cols):
            result.data[i][j] = a.data[i][j] - b.data[i][j]
    return result

randomization

def randomize(self):
    for i in range(self.rows):
        for j in range(self.cols):
            self.data[i][j] = random.uniform(-1, 1)

transposing a matrix

@staticmethod
def transpose(m):
    result = Matrix(m.cols, m.rows)
    for i in range(m.rows):
        for j in range(m.cols):
            result.data[j][i] = m.data[i][j]
    return result

applying a function

def map(self, func):
    for i in range(self.rows):
        for j in range(self.cols):
            self.data[i][j] = func(self.data[i][j], i, j)

@staticmethod
def staticMap(m, func):
    result = Matrix(m.rows, m.cols)
    for i in range(m.rows):
        for j in range(m.cols):
            val = m.data[i][j]
            result.data[i][j] = func(val, 0, 0)
    return result

printing a matrix

def printMatrix(self):
    b = []
    a = []
    for i in range(self.rows):
        a = []
        for j in range(self.cols):
            a.append([self.data[i][j]])
        b.append(a)
    for i in range (len(b)):
        print(b[i])
    print()

class SingleLayerNewralNetwork():
def init(self, inputNodes, hiddenNodes, OutputNodes):

    self.inputNodes = inputNodes
    self.hiddenNodes = hiddenNodes
    self.outputNodes = OutputNodes

    self.weights_ih = Matrix(self.hiddenNodes, self.inputNodes)
    self.weights_ho = Matrix(self.outputNodes, self.hiddenNodes)

    self.bias_h = Matrix(self.hiddenNodes, 1)
    self.bias_o = Matrix(self.outputNodes, 1)

    self.weights_ho.randomize()
    self.weights_ih.randomize()
    self.bias_h.randomize()
    self.bias_o.randomize()

    self.learningRate = 0.1

def feedForward(self, inputArray):
    # generating hidden outputs
    inputs = Matrix.fromArray(inputArray)

    hidden = Matrix.MatrixMultiply(self.weights_ih, inputs)
    hidden.add(self.bias_h)
    # activation function
    hidden.map(sigmoid)

    # generating the output's output
    output = Matrix.MatrixMultiply(self.weights_ho, hidden)
    output.add(self.bias_o)
    # activation function
    output.map(sigmoid)

    # done!
    return output.toArray()

def train(self, inputs_array, targets_array):

    # generating hidden's outputs
    inputs = Matrix.fromArray(inputs_array)
    hidden = Matrix.MatrixMultiply(self.weights_ih, inputs)
    hidden.add(self.bias_h)
    # activation function
    hidden.map(sigmoid)

    # generating the output's outputs
    outputs = Matrix.MatrixMultiply(self.weights_ho, hidden)
    outputs.add(self.bias_o)
    # activation function
    outputs.map(sigmoid)

    targets = Matrix.fromArray(targets_array)

    # error = targets - outputs

    # formula->
    # del(W) = lr * E * ($ * ($-1) * H)

    outputErrors = Matrix.subtract(targets, outputs)

    # calculating gradient
    gradients = Matrix.staticMap(outputs, dsigmoid)
    gradients.multiply(outputErrors)
    gradients.multiply(self.learningRate)

    # calculationg deltas
    hiddenT = Matrix.transpose(hidden)
    weight_ho_deltas = Matrix.MatrixMultiply(gradients, hiddenT)

    # adjust the weights and biases
    self.weights_ho.add(weight_ho_deltas)
    self.bias_o.add(gradients)

    # calculating hidden error
    weight_ho_t = Matrix.transpose(self.weights_ho)
    hiddenErrors = Matrix.MatrixMultiply(weight_ho_t, outputErrors)

    # calculating hidden layer gradient
    hiddenGradient = Matrix.staticMap(hidden, dsigmoid)
    hiddenGradient.multiply(hiddenErrors)
    hiddenGradient.multiply(self.learningRate)

    # calculate input -> hidden deltas
    inputsT = Matrix.transpose(inputs)
    weight_ih_deltas = Matrix.MatrixMultiply(hiddenGradient, inputsT)

    # adjust the weights and biases
    self.weights_ih.add(weight_ih_deltas)
    self.bias_h.add(hiddenGradient)

class MultiLayerNewralNetwork():

def __init__(self, inputNodes, arrayOfHiddenNodes, OutputNodes):

    self.inputNodes = inputNodes
    self.hiddenLayers = arrayOfHiddenNodes
    self.outputNodes = OutputNodes

    self.weights_ih = Matrix(self.hiddenLayers[0], self.inputNodes)
    self.weights_ho = Matrix(self.outputNodes, self.hiddenLayers[len(self.hiddenLayers)-1])
    self.weights_h = []
    for i in range(len(self.hiddenLayers)):
        if i != 0:
            self.weights_h.append(Matrix(self.hiddenLayers[i], self.hiddenLayers[i-1]))

    self.bias_o = Matrix(self.outputNodes, 1)
    self.bias_h = []
    for i in range(len(self.hiddenLayers)):
        self.bias_h.append(Matrix(self.hiddenLayers[i], 1))


    for i in range(len(self.weights_h)):
        self.weights_h[i].randomize()
    self.weights_ho.randomize()
    self.weights_ih.randomize()

    for i in range(len(self.bias_h)):
        self.bias_h[i].randomize()
    self.bias_o.randomize()

    self.learningRate = 0.1

def feedForward(self, inputArray):
    # generating hidden outputs
    inputs = Matrix.fromArray(inputArray)


    hidden = Matrix.MatrixMultiply(self.weights_ih, inputs)
    hidden.add(self.bias_h[0])
    hidden.map(sigmoid)

    hiddenVals = []
    hiddenVals.append(hidden)
    for i in range(len(self.weights_h)):
        a = Matrix.MatrixMultiply(self.weights_h[i], hiddenVals[len(hiddenVals)-1])
        a.add(self.bias_h[i+1])
        a.map(sigmoid)
        hiddenVals.append(a)

    output = Matrix.MatrixMultiply(self.weights_ho, hiddenVals[len(hiddenVals)-1])
    output.add(self.bias_o)
    output.map(sigmoid)

    # done!
    return output.toArray()

def train(self, inputs_array, targets_array):

    # generating hidden outputs
    inputs = Matrix.fromArray(inputs_array)

    hidden = Matrix.MatrixMultiply(self.weights_ih, inputs)
    hidden.add(self.bias_h[0])
    hidden.map(sigmoid)

    hiddenVals = []
    hiddenVals.append(hidden)
    for i in range(len(self.weights_h)):
        a = Matrix.MatrixMultiply(self.weights_h[i], hiddenVals[len(hiddenVals) - 1])
        a.add(self.bias_h[i + 1])
        a.map(sigmoid)
        hiddenVals.append(a)

    output = Matrix.MatrixMultiply(self.weights_ho, hiddenVals[len(hiddenVals) - 1])
    output.add(self.bias_o)
    output.map(sigmoid)

    """
    feed forward part over
    """

    targets = Matrix.fromArray(targets_array)
    outputErrors = Matrix.subtract(targets, output)

    # calculating gradient
    gradients = Matrix.staticMap(output, dsigmoid)  # dsigmoid the next layer
    gradients.multiply(outputErrors)  # multiply errors of nest layer
    gradients.multiply(self.learningRate)  # multiply lr

    # calculationg deltas
    hiddenT = Matrix.transpose(hiddenVals[len(hiddenVals) - 1])  # transpose previouse layer
    weight_ho_deltas = Matrix.MatrixMultiply(gradients, hiddenT)  # multiply to gradient

    # adjust the weights and biases
    self.weights_ho.add(weight_ho_deltas)  # add deltas
    self.bias_o.add(gradients)  # add gradients to the next layer

    hiddenErrors = [outputErrors]

    hiddenGradients = []
    hiddenGradients.append(gradients)
    # gradients.printMatrix()
    weightss = []

    for i in self.weights_h:
        weightss.append(i)

    weightss.append(self.weights_ho)

    for i in range(len(self.weights_h)):
        weight  = weightss[len(weightss) - 1 - i]
        weightT = Matrix.transpose(weight)
        error = Matrix.MatrixMultiply(weightT, hiddenErrors[0])
        hiddenErrors.insert(0, error)

        gradient = Matrix.staticMap(hiddenVals[len(hiddenVals) - 1 - i], dsigmoid)
        gradient.multiply(error)
        gradient.multiply(self.learningRate)

        previous_layer_t = Matrix.transpose(hiddenVals[len(hiddenVals) - 2 - i])
        delta = Matrix.MatrixMultiply(gradient, previous_layer_t)
        self.weights_h[len(self.weights_h) - 1 - i].add(delta)
        self.bias_h[len(self.bias_h) - 1 - i].add(gradient)

        hiddenGradients.append(gradient)

    wT = Matrix.transpose(weightss[0])
    e = Matrix.MatrixMultiply(wT, hiddenErrors[0])

    g = Matrix.staticMap(hiddenVals[0], dsigmoid)
    g.multiply(e)
    g.multiply(self.learningRate)

    # calculate input -> hidden deltas
    inputsT = Matrix.transpose(inputs)
    weight_ih_deltas = Matrix.MatrixMultiply(g, inputsT)

    # adjust the weights and biases
    self.weights_ih.add(weight_ih_deltas)
    self.bias_h[0].add(g)

if name == 'main':
print("This is a Newral Network Library")
print("-By CHAITANYA JAIN")
`

please please please add it. It is the first time I am ever contributing.
I don't even know how to give my code but i have just tried.

Maik1999 and others added 30 commits February 25, 2018 18:33
Added link to pull request 61
Dear Diary,

I spent the day recording a set of video tutorials about how to do image classification with a simple "toy" neural network. There are lots of problems with this and the goal is purely educational and to have some fun. I hope to improve this example in the future. It could use an intrface and some animations, the softmax function, and working with a larger dataset and ml5/deeplearn.js.

yours forever,
The Coding Train (choo choo)
nothing to see here yet!
This here is the start of code I need for my neuro-evolution examples.
So far I have only implemented "copy()" and "mutate()" ok! I still need
to do "crossover()" and implement the GA itself of course. For
reference the live stream is here:

https://www.youtube.com/watch?v=ASnCXW6pPSY
This adds a test for the `Matrix.copy()` function.
The doodle classifier example was missing under the examples list.
Also links to the youtube videos coding these examples are added.
Create a README in the xor example
Added a section where the community can reference their own libraries they've built based on this one.
added community contributions
This is a new example with a lot of great help from @meiamsome and more.
Discussion in this live stream starting here:

https://youtu.be/emjv5tr-m7Q?t=4898
New neuroevolution steering example
Examples added doodleclassifier and Youtube links
Missing semi-colon in matrix.js
Added another Library reference
modified the arguments of the constructor to be self-documenting and added documentation to explain the cloning constructor
modified constructor args to be self documenting
shiffman and others added 11 commits March 15, 2019 10:49
Convolutional Neural Network from scratch in JS. Example is demonstrated on MNIST dataset. 
The uploaded brain.json contains brain with 80% accuracy on test dataset.
The example can be run on browser on :
https://therealyubraj.github.io/CNN_JS/
Click on the load button to load pre-trained brain.
Added my implementation of CNN
@github-pages github-pages bot temporarily deployed to github-pages February 23, 2022 13:23 Inactive
@github-pages github-pages bot temporarily deployed to github-pages June 22, 2022 10:50 Inactive
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.