Skip to content

Instantly share code, notes, and snippets.

@oskin1
Created January 20, 2019 20:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save oskin1/11ace36b7d8e576b301a48d21201e493 to your computer and use it in GitHub Desktop.
Save oskin1/11ace36b7d8e576b301a48d21201e493 to your computer and use it in GitHub Desktop.
import numpy as np
# sigmoid function
def nonlin(x):
return 1 / (1 + np.exp(-x))
# sigmoid derivative
def noniln_deriv(x):
return x * (1 - x)
# input
X = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
# output - X1,: ^ X2,:
Y = np.array([[0],
[1],
[1],
[0]])
np.random.seed(0)
# synapses initialization
syn0 = 2 * np.random.random((3, 4)) # input -> hidden layer weights matrix
syn1 = 2 * np.random.random((4, 1)) # hidden layer -> output weights matrix
# training
for j in range(80000):
# forward
l0 = X # input layer
l1 = nonlin(np.dot(l0, syn0)) # hidden layer
l2 = nonlin(np.dot(l1, syn1)) # output layer
# Back propagation of errors using chain rule
l2_error = Y - l2
if (j % 10000) == 0:
print(f'Error: {str(np.mean(np.abs(l2_error)))}')
l2_delta = l2_error * noniln_deriv(l2)
l1_error = l2_delta.dot(syn1.T)
l1_delta = l1_error * noniln_deriv(l1)
# unpdate weights
syn1 += l1.T.dot(l2_delta)
syn0 += l0.T.dot(l1_delta)
print('Output after training:')
print(l2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment