Skip to content

Instantly share code, notes, and snippets.

Created January 26, 2017 09:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save anonymous/44dbf61283d1aa3e13c0645cc505f8c5 to your computer and use it in GitHub Desktop.
Save anonymous/44dbf61283d1aa3e13c0645cc505f8c5 to your computer and use it in GitHub Desktop.
neural network from scratch
import numpy as np
def sigmoid( z):
#Apply sigmoid activation function to scalar, vector, or matrix
return 1/(1+np.exp(-z))
def sigmoidPrime(z):
#Gradient of sigmoid
return np.exp(-z)/((1+np.exp(-z))**2)
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
#input output parameters
inputLayerSize = 2
outputLayerSize = 1
hiddenLayerSize = 3
#random weights for input to hidden
W1 = np.random.randn(inputLayerSize,hiddenLayerSize)
#random weights for output to hidden
W2 = np.random.randn(hiddenLayerSize,outputLayerSize)
#forward prpogation
z2 = np.dot(X, W1)
a2 = sigmoid(z2) #input to hidden activation function
z3 = np.dot(a2, W2)
yHat = sigmoid(z3) #hidden to output activation function
#cost function calculating for layers
delta3 = np.multiply(-(y-yHat), sigmoidPrime(z3)) #output to hidden layer
dJdW2 = np.dot(a2.T, delta3)
delta2 = np.dot(delta3, W2.T)*sigmoidPrime(z2) #hidden to input layer
dJdW1 = np.dot(X.T, delta2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment