Skip to content

Instantly share code, notes, and snippets.

@tomahawk-pilot
Created November 28, 2017 18:19
Show Gist options
  • Save tomahawk-pilot/2cdb718d23360e5d24fd5208a1419900 to your computer and use it in GitHub Desktop.
Save tomahawk-pilot/2cdb718d23360e5d24fd5208a1419900 to your computer and use it in GitHub Desktop.
backpropagation
import numpy as np
def sigmoid(x, derivative=False):
if (derivative == True):
return x * (1 - x)
else:
return 1 / (1 + np.exp(-x))
np.random.seed(1)
alpha = .2
num_hidden = 2
X = np.array([
[0,0],
[0,1],
[1,0],
[1,1]
])
y = np.array([[0, 1, 1,0]]).T
hidden_weights = 2*np.random.random((X.shape[1] + 1, num_hidden)) - 1
output_weights = 2*np.random.random((num_hidden + 1, y.shape[1])) - 1
num_iterations = 1000
for i in range(num_iterations):
input_layer_outputs = np.hstack((np.ones((X.shape[0], 1)), X))
hidden_layer_outputs = np.hstack((np.ones((X.shape[0], 1)), sigmoid(np.dot(input_layer_outputs, hidden_weights))))
output_layer_outputs = np.dot(hidden_layer_outputs, output_weights)
output_error = output_layer_outputs - y
hidden_error = hidden_layer_outputs[:, 1:] * (1 - hidden_layer_outputs[:, 1:]) * np.dot(output_error, output_weights.T[:, 1:])
hidden_pd = input_layer_outputs[:, :, np.newaxis] * hidden_error[: , np.newaxis, :]
output_pd = hidden_layer_outputs[:, :, np.newaxis] * output_error[:, np.newaxis, :]
total_hidden_gradient = np.average(hidden_pd, axis=0)
total_output_gradient = np.average(output_pd, axis=0)
hidden_weights += - alpha * total_hidden_gradient
output_weights += - alpha * total_output_gradient
print("Output After Training: \n{}".format(output_layer_outputs))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment