Skip to content

Instantly share code, notes, and snippets.

@jamesloyys
Created May 14, 2018 08:54
Show Gist options
  • Star 40 You must be signed in to star a gist
  • Fork 22 You must be signed in to fork a gist
  • Save jamesloyys/ff7a7bb1540384f709856f9cdcdee70d to your computer and use it in GitHub Desktop.
Save jamesloyys/ff7a7bb1540384f709856f9cdcdee70d to your computer and use it in GitHub Desktop.
class NeuralNetwork:
def __init__(self, x, y):
self.input = x
self.weights1 = np.random.rand(self.input.shape[1],4)
self.weights2 = np.random.rand(4,1)
self.y = y
self.output = np.zeros(self.y.shape)
def feedforward(self):
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.output = sigmoid(np.dot(self.layer1, self.weights2))
def backprop(self):
# application of the chain rule to find derivative of the loss function with respect to weights2 and weights1
d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))
d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))
# update the weights with the derivative (slope) of the loss function
self.weights1 += d_weights1
self.weights2 += d_weights2
@MustafaSobhy
Copy link

Could you tell how to add inputs?

@madhurisuthar
Copy link

Could you tell how to add inputs?

Hi Mustafa,
Here is the code that I wrote which lets you give inputs, train the network and keep track of the loss.
Best,
Madhuri

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tuesday Oct 2, 2018
@author: Madhuri Suthar, PhD Candidate in Electrical and Computer Engineering, UCLA
"""

# Imports
import numpy as np 
      
# Each row is a training example, each column is a feature  [X1, X2, X3]
X=np.array(([0,0,1],[0,1,1],[1,0,1],[1,1,1]), dtype=float)
y=np.array(([0],[1],[1],[0]), dtype=float)

# Define useful functions    

# Activation function
def sigmoid(t):
    return 1/(1+np.exp(-t))

# Derivative of sigmoid
def sigmoid_derivative(p):
    return p * (1 - p)

# Class definition
class NeuralNetwork:
    def __init__(self, x,y):
        self.input = x
        self.weights1= np.random.rand(self.input.shape[1],4) # considering we have 4 nodes in the hidden layer
        self.weights2 = np.random.rand(4,1)
        self.y = y
        self.output = np. zeros(y.shape)
        
    def feedforward(self):
        self.layer1 = sigmoid(np.dot(self.input, self.weights1))
        self.layer2 = sigmoid(np.dot(self.layer1, self.weights2))
        return self.layer2
        
    def backprop(self):
        d_weights2 = np.dot(self.layer1.T, 2*(self.y -self.output)*sigmoid_derivative(self.output))
        d_weights1 = np.dot(self.input.T, np.dot(2*(self.y -self.output)*sigmoid_derivative(self.output), self.weights2.T)*sigmoid_derivative(self.layer1))
    
        self.weights1 += d_weights1
        self.weights2 += d_weights2

    def train(self, X, y):
        self.output = self.feedforward()
        self.backprop()
        

NN = NeuralNetwork(X,y)
for i in range(1500): # trains the NN 1,000 times
    if i % 100 ==0: 
        print ("for iteration # " + str(i) + "\n")
        print ("Input : \n" + str(X))
        print ("Actual Output: \n" + str(y))
        print ("Predicted Output: \n" + str(NN.feedforward()))
        print ("Loss: \n" + str(np.mean(np.square(y - NN.feedforward())))) # mean sum squared loss
        print ("\n")
  
    NN.train(X, y)

@disser2
Copy link

disser2 commented Oct 5, 2018

@madhurisuthar
In the backprop method you say: "self.layer1.T" - What is the "T" refering to?

@Sebastian-Nielsen
It is the transposition of a matrix, see here: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.T.html#numpy.ndarray.T

@NehaBari
Copy link

NehaBari commented Oct 8, 2018

Could you tell how to add inputs?

Hi Mustafa,
Here is the code that I wrote which lets you give inputs, train the network and keep track of the loss.
Best,
Madhuri

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tuesday Oct 2, 2018
@author: Madhuri Suthar, PhD Candidate in Electrical and Computer Engineering, UCLA
"""

# Imports
import numpy as np 
      
# Each row is a training example, each column is a feature  [X1, X2, X3]
X=np.array(([0,0,1],[0,1,1],[1,0,1],[1,1,1]), dtype=float)
y=np.array(([0],[1],[1],[0]), dtype=float)

# Define useful functions    

# Activation function
def sigmoid(t):
    return 1/(1+np.exp(-t))

# Derivative of sigmoid
def sigmoid_derivative(p):
    return p * (1 - p)

# Class definition
class NeuralNetwork:
    def __init__(self, x,y):
        self.input = x
        self.weights1= np.random.rand(self.input.shape[1],4) # considering we have 4 nodes in the hidden layer
        self.weights2 = np.random.rand(4,1)
        self.y = y
        self.output = np. zeros(y.shape)
        
    def feedforward(self):
        self.layer1 = sigmoid(np.dot(self.input, self.weights1))
        self.layer2 = sigmoid(np.dot(self.layer1, self.weights2))
        return self.layer2
        
    def backprop(self):
        d_weights2 = np.dot(self.layer1.T, 2*(self.y -self.output)*sigmoid_derivative(self.output))
        d_weights1 = np.dot(self.input.T, np.dot(2*(self.y -self.output)*sigmoid_derivative(self.output), self.weights2.T)*sigmoid_derivative(self.layer1))
    
        self.weights1 += d_weights1
        self.weights2 += d_weights2

    def train(self, X, y):
        self.output = self.feedforward()
        self.backprop()
        

NN = NeuralNetwork(X,y)
for i in range(1500): # trains the NN 1,000 times
    if i % 100 ==0: 
        print ("for iteration # " + str(i) + "\n")
        print ("Input : \n" + str(X))
        print ("Actual Output: \n" + str(y))
        print ("Predicted Output: \n" + str(NN.feedforward()))
        print ("Loss: \n" + str(np.mean(np.square(y - NN.feedforward())))) # mean sum squared loss
        print ("\n")
  
    NN.train(X, y)

Thanks!

@ChristianValdemar
Copy link

Thank you so much for sharing your work.
Do you know if it is some special case, when we only have one hidden node?

I wanted to make a very small example, that one could d0 step by step by hand. So I was thinking: one (maybe two) input node, one hidden node and one output node. However, doing that the output function either range from 0 to 0.5 or from 0.5 to 1, after training the network. Here is a simple classification example, based on your code:

import numpy as np

def sigmoid(x):
    return 1.0/(1+ np.exp(-x))

def sigmoid_derivative(x):
    return x * (1.0 - x)

class NeuralNetwork:
    def __init__(self, x, y):
        self.input      = x
        self.weights1   = np.array([[-1.0]]) #np.random.rand(self.input.shape[1],1) 
        self.weights2   = np.array([[-1.0]]) #np.random.rand(1,1)                 
        self.y          = y
        self.output     = np.zeros(self.y.shape)

    def feedforward(self):
        self.layer1 = sigmoid(np.dot(self.input, self.weights1))
        self.output = sigmoid(np.dot(self.layer1, self.weights2))

    def backprop(self):
        # application of the chain rule to find derivative of the loss function with respect to weights2 and weights1
        d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))
        d_weights1 = np.dot(self.input.T,  (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))

        # update the weights with the derivative (slope) of the loss function
        self.weights1 += d_weights1
        self.weights2 += d_weights2


if __name__ == "__main__":
    X = np.array([[0.0],
                  [0.2],
                  [0.7],
                  [0.9]])
    y = np.array([[0],[0],[1],[1]])
    nn = NeuralNetwork(X,y)

    for i in range(15000):
        nn.feedforward()
        nn.backprop()

    print(nn.output)

This example returns: [[2.30303470e-06] [5.77865871e-02] [4.96063404e-01] [4.99525486e-01]] Where the expected outcome would be that the last two probabilities should be close to one.

I also tried with two input nodes, but the problem is still there. So as I started to ask, do you or anyone else know if there is some special case, when we only have one hidden node?

Thank you in advance.

@tubabal
Copy link

tubabal commented Oct 29, 2018

Dear all, can anyone share numpy module with me?

@xwanyiri
Copy link

xwanyiri commented Nov 7, 2018

Dear all, can anyone share numpy module with me?

install using pip:
pip install numpy

@nesrinewagaa
Copy link

Hello , please I want to create a neural network code by using a threshold function not , the sigmoid function. in this work I have just one hidden layer.I use all the weights equal to 0.
this schema gives an idea about my work :

captureneraul
python code :

[import numpy as np

x0=1
x1=0.8
x2=0.2
X=np.array(([x0,x1,x2]), dtype=float)
yd=np.array(([0.4]), dtype=float)
w1=w2=w3=w4=wb1=wb2=w5=w6=wb3=0
W = np.array([[w1],[w2],[w3],[w4],[wb1],[wb2],[w5],[w6],[wb3]])

Class definition

class NeuralNetwork:
def init(self, X,yd):
self.X = X
self.W= W
self.yd = yd
self.z = np. zeros(yd.shape)

def feedforward(self):
    self.y1 = wb1+w1*x1+w2*x2
    self.y2 = wb2+w3*x1+w4*x2
    self.z = wb3+w5*self.y1+w6*self.y2
    return self.z
    
def backprop(self):
    dw1=w5*x1
    dw2=w5*x2
    dw3=w6*x1
    dw4=w6*x2
    dwb1=w5
    dwb2=w6
    dw5=wb1+w1*x1+w2*x2
    dw6=wb2+w3*x1+w4*x2
    dwb3=1
    
    F=(np.array([[dw1,dw2,dw3,dw4,dwb1,dwb2,dw5,dw6,dwb3]]))

    FT = np.transpose(F)       
    cte= - FT.dot(F.dot(FT) ** (-1))*10*( NN.feedforward()-yd)
    W= W+0.1*cte
   

def train(self, X ,yd):
    self.output = self.feedforward()
    self.backprop()

NN = NeuralNetwork( X, yd)
for i in range(100): # trains the NN 100 times
#if i % 100 ==0:
print ("loss: \n" + str( NN.feedforward()-yd))
print ("\n")

NN.train( X, yd)

](url)
my problem I don't obtained the good result always the loss value is stable equal to (-0.4).
please who can help .
Thanks

@Bornlex
Copy link

Bornlex commented Dec 12, 2018

Hello guys,

I started from the article here : article

and I wrote some code.
However, it does not work and I have tried so many things already that I am quite lost.
Can someone help please?

Here is the code

# coding: utf-8

from mnist import MNIST
import numpy as np
import math
import os
import pdb


DATASETS_PREFIX    = '../Datasets/MNIST'
mndata             = MNIST(DATASETS_PREFIX)
TRAINING_IMAGES, TRAINING_LABELS  = mndata.load_training()
TESTING_IMAGES , TESTING_LABELS   = mndata.load_testing()

### UTILS

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def d_sigmoid(x):
    return x.T * (1 - x)
    #return np.dot(x.T, 1.0 - x)

def softmax(x):
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum()

#def d_softmax(x):
#    tmp = x.reshape((-1,1))
#    return np.diag(x) - np.dot(tmp, tmp.T)

def d_softmax(x):
    #This function has not yet been tested.
    return x.T * (1 - x)

def tanh(x):
    return np.tanh(x)

def d_tanh(x):
    return 1 - x.T * x

def normalize(image):
    return image / (255.0 * 0.99 + 0.01)

### !UTILS

class NeuralNetwork(object):
    """
    This is a 3-layer neural network (1 hidden layer).
    @_input   : input layer
    @_weights1: weights between input layer and hidden layer  (matrix shape (input.shape[1], 4))
    @_weights2: weights between hidden layer and output layer (matrix shape (4, 1))
    @_y       : output
    @_output  : computed output
    @_alpha   : learning rate
    """
    def __init__(self, xshape, yshape):
        self._neurones_nb = 20
        self._input       = None
        self._weights1    = np.random.randn(xshape, self._neurones_nb)
        self._weights2    = np.random.randn(self._neurones_nb, yshape)
        self._y           = np.mat(np.zeros(yshape))
        self._output      = np.mat(np.zeros(yshape))
        self._alpha1      = 0.01
        self._alpha2      = 0.01
        self._function    = sigmoid
        self._derivative  = d_sigmoid
        self._epoch       = 1

    def Train(self, xs, ys):
        for j in range(self._epoch):
            for i in range(len(xs)):
                self._input = normalize(np.mat(xs[i]))
                self._y[0, ys[i]] = 1
                self.feedforward()
                self.backpropagation()
                self._y[0, ys[i]] = 0

    def Predict(self, image):
        self._input = normalize(image)
        out = self.feedforward()
        return out

    def feedforward(self):
        self._layer1 = self._function(np.dot(self._input, self._weights1))
        self._output = self._function(np.dot(self._layer1, self._weights2))
        return self._output

    def backpropagation(self):
        d_weights2 = np.dot(
            self._layer1.T,
            2 * (self._y - self._output) * self._derivative(self._output)
        )
        d_weights1 = np.dot(
            self._input.T,
            np.dot(
                2 * (self._y - self._output) * self._derivative(self._output),
                self._weights2.T
            ) * self._derivative(self._layer1)
        )
        self._weights1 += self._alpha1 * d_weights1
        self._weights2 += self._alpha2 * d_weights2

if __name__ == '__main__':
    neural_network = NeuralNetwork(len(TRAINING_IMAGES[0]), 10)
    print('* training neural network')
    neural_network.Train(TRAINING_IMAGES, TRAINING_LABELS)
    print('* testing neural network')
    count = 0
    for i in range(len(TESTING_IMAGES)):
        image       = np.mat(TESTING_IMAGES[i])
        expected    = TESTING_LABELS[i]
        prediction  = neural_network.Predict(image)
        if i % 100 == 0: print(expected, prediction)
    #print(f'* results: {count} / {len(TESTING_IMAGES)}')```

Thank you.

@Nex4rius
Copy link

If I change this

self.weights1= np.random.rand(self.input.shape[1],4) # considering we have 4 nodes in the hidden layer
self.weights2 = np.random.rand(4,1)

to this

self.weights1= np.random.rand(self.input.shape[1],100) # considering we have 100 nodes in the hidden layer
self.weights2 = np.random.rand(100,1)

then I'm only getting 1 in the output everywhere from the start (without training).

Why would adding more hidden nodes cause this?

@AndyM10
Copy link

AndyM10 commented Dec 9, 2019

So how would you modify this design for 2 hidden layers instead of just the 1

@SibuleleMboyi
Copy link

If I change this

self.weights1= np.random.rand(self.input.shape[1],4) # considering we have 4 nodes in the hidden layer
self.weights2 = np.random.rand(4,1)

to this

self.weights1= np.random.rand(self.input.shape[1],100) # considering we have 100 nodes in the hidden layer
self.weights2 = np.random.rand(100,1)

then I'm only getting 1 in the output everywhere from the start (without training).

Why would adding more hidden nodes cause this?

I am also stuck here, did you figure it out?

@djamelherbadji
Copy link

I want the paython code of neurel network where: input layer part is composed of two neurons, . The hidden layer is constituted of two under-layers of 20 and 10 neurons for the first under-layer and the second under-layer respectively. The output layer is composed of 5 neurons.

@CoolCat467
Copy link

This system will only work for if network layers = 2. No more, that is why your tests with more layers fail. the backpropagation function only modifies weights from the second to last layer and last layer.

@ClackHack
Copy link

Does anyone know how to use more than one layer. I have most of it figured out, but I am stuck on the backpropagation mostly

@ClackHack
Copy link

Does anyone know how to use more than one layer. I have most of it figured out, but I am stuck on the backpropagation mostly

Sorta figured it out, but how do I back propagate with variable sizes?

@playermarker
Copy link

Could you tell how to add inputs?

Hi Mustafa, Here is the code that I wrote which lets you give inputs, train the network and keep track of the loss. Best, Madhuri

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tuesday Oct 2, 2018
@author: Madhuri Suthar, PhD Candidate in Electrical and Computer Engineering, UCLA
"""

# Imports
import numpy as np 
      
# Each row is a training example, each column is a feature  [X1, X2, X3]
X=np.array(([0,0,1],[0,1,1],[1,0,1],[1,1,1]), dtype=float)
y=np.array(([0],[1],[1],[0]), dtype=float)

# Define useful functions    

# Activation function
def sigmoid(t):
    return 1/(1+np.exp(-t))

# Derivative of sigmoid
def sigmoid_derivative(p):
    return p * (1 - p)

# Class definition
class NeuralNetwork:
    def __init__(self, x,y):
        self.input = x
        self.weights1= np.random.rand(self.input.shape[1],4) # considering we have 4 nodes in the hidden layer
        self.weights2 = np.random.rand(4,1)
        self.y = y
        self.output = np. zeros(y.shape)
        
    def feedforward(self):
        self.layer1 = sigmoid(np.dot(self.input, self.weights1))
        self.layer2 = sigmoid(np.dot(self.layer1, self.weights2))
        return self.layer2
        
    def backprop(self):
        d_weights2 = np.dot(self.layer1.T, 2*(self.y -self.output)*sigmoid_derivative(self.output))
        d_weights1 = np.dot(self.input.T, np.dot(2*(self.y -self.output)*sigmoid_derivative(self.output), self.weights2.T)*sigmoid_derivative(self.layer1))
    
        self.weights1 += d_weights1
        self.weights2 += d_weights2

    def train(self, X, y):
        self.output = self.feedforward()
        self.backprop()
        

NN = NeuralNetwork(X,y)
for i in range(1500): # trains the NN 1,000 times
    if i % 100 ==0: 
        print ("for iteration # " + str(i) + "\n")
        print ("Input : \n" + str(X))
        print ("Actual Output: \n" + str(y))
        print ("Predicted Output: \n" + str(NN.feedforward()))
        print ("Loss: \n" + str(np.mean(np.square(y - NN.feedforward())))) # mean sum squared loss
        print ("\n")
  
    NN.train(X, y)

Hi have an error after running this code it says "unsupported operand type(s) for -: 'float' and 'NoneType' " how could I solve it

@Mrezakhodashenas
Copy link

Have the same issue.
Any tips, guys?
It seems "NN.feedforward()" is none!
Thanks in adv.

@jamesloyys

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment