Skip to content

Instantly share code, notes, and snippets.

@TPME

TPME/nn2.py Secret

Created July 7, 2017 22:45
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save TPME/4786ad5c967f5e8e319a4dac41ad0383 to your computer and use it in GitHub Desktop.
Save TPME/4786ad5c967f5e8e319a4dac41ad0383 to your computer and use it in GitHub Desktop.
# Define size of the layers, as well as the learning rate alpha and the max error
inputLayerSize = 2
hiddenLayerSize = 3
outputLayerSize = 1
alpha = 0.01
maxError = 0.001
# Import dependencies
import numpy
from sklearn import preprocessing
# Make random numbers predictable
numpy.random.seed(1)
# Define our activation function
# In this case, we use the Sigmoid function
def sigmoid(x):
output = 1/(1+numpy.exp(-x))
return output
def sigmoid_derivative(x):
return x*(1-x)
# Define the cost function
def calculateError(Y, Y_predicted):
return -numpy.mean( (Y * numpy.log(Y_predicted) +
(1-Y)* numpy.log(1 - Y_predicted)))
# Set inputs
# Each row is (x1, x2)
X = numpy.array([
[7, 4.7],
[6.3, 6],
[6.9, 4.9],
[6.4, 5.3],
[5.8, 5.1],
[5.5, 4],
[7.1, 5.9],
[6.3, 5.6],
[6.4, 4.5],
[7.7, 6.7]
])
# Normalize the inputs
#X = preprocessing.scale(X)
# Set goals
# Each row is (y1)
Y = numpy.array([
[0],
[1],
[0],
[1],
[1],
[0],
[0],
[1],
[0],
[1]
])
# Randomly initialize our weights with mean 0
weights_1 = 2*numpy.random.random((inputLayerSize, hiddenLayerSize)) - 1
weights_2 = 2*numpy.random.random((hiddenLayerSize, outputLayerSize)) - 1
# Randomly initialize our bias with mean 0
bias_1 = 2*numpy.random.random((hiddenLayerSize)) - 1
bias_2 = 2*numpy.random.random((outputLayerSize)) - 1
# Loop 10,000 times
for i in xrange(100000):
# Feed forward through layers 0, 1, and 2
layer_0 = X
layer_1 = sigmoid(numpy.dot(layer_0, weights_1)+bias_1)
layer_2 = sigmoid(numpy.dot(layer_1, weights_2)+bias_2)
# Calculate the cost function
# How much did we miss the target value?
layer_2_error = layer_2 - Y
# In what direction is the target value?
# Were we really sure? if so, don't change too much.
layer_2_delta = layer_2_error
# How much did each layer_1 value contribute to the layer_2 error (according to the weights)?
layer_1_error = layer_2_delta.dot(weights_2.T)
# In what direction is the target layer_1?
# Were we really sure? If so, don't change too much.
layer_1_delta = layer_1_error * sigmoid_derivative(layer_1)
# Update the weights
weights_2 -= alpha * layer_1.T.dot(layer_2_delta)
weights_1 -= alpha * layer_0.T.dot(layer_1_delta)
# Update the bias
bias_2 -= alpha * numpy.sum(layer_2_delta, axis=0)
bias_1 -= alpha * numpy.sum(layer_1_delta, axis=0)
# Print the error to show that we are improving
if (i% 1000) == 0:
print "Error after "+str(i)+" iterations: " + str(calculateError(Y, layer_2))
# Exit if the error is less than maxError
if(calculateError(Y, layer_2)<maxError):
print "Goal reached after "+str(i)+" iterations: " + str(calculateError(Y, layer_2)) + " is smaller than the goal of " + str(maxError)
break
# Show results
print ""
print "Weights between Input Layer -> Hidden Layer"
print weights_1
print ""
print "Bias of Hidden Layer"
print bias_1
print ""
print "Weights between Hidden Layer -> Output Layer"
print weights_2
print ""
print "Bias of Output Layer"
print bias_2
print ""
print "Computed probabilities for SALE (rounded to 3 decimals)"
print numpy.around(layer_2, decimals=3)
print ""
print "Real probabilities for SALE"
print Y
print ""
print "Final Error"
print str(calculateError(Y, layer_2))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment