Skip to content

Instantly share code, notes, and snippets.

@RyotaBannai
Forked from miloharper/main.py
Last active February 5, 2019 04:34
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save RyotaBannai/846667ceddff912cd8c3695c7cc2181e to your computer and use it in GitHub Desktop.
Save RyotaBannai/846667ceddff912cd8c3695c7cc2181e to your computer and use it in GitHub Desktop.
A two layer neural network written in Python, which trains itself to solve a variation of the XOR problem.
from numpy import exp, array, random, dot, reshape
from autograd import grad
class NeuronLayer():
def __init__(self, neuron_n, inputs_n):
self.weights = 2 * random.random((inputs_n, neuron_n)) - 1
class NeuralNetwork():
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
sigmoid = lambda x: return 1 / (1 + exp(-x))
sigmoid_deriv = grad(sigmoid)
def train(self, t_inputs, t_outputs, iteration_n):
for iteration in xrange(iteration_n):
# Calculate neural network
op_layer1, op_layer2 = self.calculate_output(t_inputs)
# Calculate the error for layer 2
# (The difference between the desired output and the predicted output).
layer2_error = t_outputs - op_layer2
layer2_delta = layer2_error * self.sigmoid_deriv(op_layer2)
# Calculate the error for layer 1
# (By looking at the weights in layer 1, we can determine by how much layer 1 contributed to the error in layer 2).
layer1_error = layer2_delta.dot(self.layer2.weights.T)
layer1_delta = layer1_error * self.sigmoid_deriv(op_layer1)
# Calculate how much to adjust the weights by
layer1_adjustment = inputs.T.dot(layer1_delta)
layer2_adjustment = op_layer1.T.dot(layer2_delta)
# Adjust the weights.
self.layer1.weights += layer1_adjustment
self.layer2.weights += layer2_adjustment
def calculate_output(self, inputs):
ot_layer1 = self.sigmoid(dot(inputs, self.layer1.weights))
ot_layer2 = self.sigmoid(dot(output_from_layer1, self.layer2.weights))
return ot_layer1, ot_layer2
# The neural network prints its weights
def print_weights(self):
print (" Layer 1 (4 neurons, each with 3 inputs): \n")
print (self.layer1.weights,'\n')
print (" Layer 2 (1 neuron, with 4 inputs): \n")
print (self.layer2.weights,'\n')
if __name__ == "__main__":
random.seed(1)
layer1 = NeuronLayer(4, 3) # 4 neurons, each with 3 inputs
layer2 = NeuronLayer(1, 4) # a single neuron with 4 inputs
neural_network = NeuralNetwork(layer1, layer2) # Combine the layers to create a neural network
neural_network.print_weights() # Combine the layers to create a neural network
# The training set. We have 7 examples, each consisting of 3 input values
# and 1 output value.
inputs = reshape([0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0], (3, -1))
outputs = array([[0, 1, 1, 1, 1, 0, 0]]).T
# Train the neural network using the training set.
iteration_n = 60000
neural_network.train(inputs, outputs, iteration_n)
neural_network.print_weights()
# Test the neural network with a new situation.
print ("Stage 3) Considering a new situation [1, 1, 0] -> ?: ")
hidden_state, output = neural_network.calculate_output(array([1, 1, 0]))
print (output)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment