Skip to content

Instantly share code, notes, and snippets.

@auser
Last active April 16, 2020 18:08
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save auser/01641656f562068bb6b2e6cf180d0f8c to your computer and use it in GitHub Desktop.
Save auser/01641656f562068bb6b2e6cf180d0f8c to your computer and use it in GitHub Desktop.
def nn(X, y, hiddenLayerSize=10, learningRate=0.01, epochs=100, debug=False):
m = X.shape[1]
outputSize = y.shape[1]
# Make our model
model = dict(
w0 = np.random.randn(m, hiddenLayerSize),
w1 = np.random.randn(hiddenLayerSize, outputSize)
)
losses = []
def sigmoid(x, derive=False):
if derive:
return x * (1-x)
return 1/(1+np.exp(-x))
def MSE(y, Y):
return np.mean((y - Y) ** 2)
def run(layer0, model):
layer1 = sigmoid(np.dot(layer0, model['w0']))
layer2 = sigmoid(np.dot(layer1, model['w1']))
return layer1, layer2
def train_step(model):
## Forward
layer1, layer2 = run(X, model)
## Backprop
l2_error = - ( y / layer2 - (1-y) / (1-layer2))
l2_delta = l2_error * sigmoid(layer2, derive=True)
l1_error = l2_delta.dot(model['w1'].T)
l1_delta = l1_error * sigmoid(layer1, derive=True)
## Store the error for plotting
loss = MSE(layer2, y)
losses.append(loss)
## Update weights
model['w1'] -= learningRate * layer1.T.dot(l2_delta)
model['w0'] -= learningRate * X.T.dot(l1_delta)
return model
for i in range(epochs):
model = train_step(model)
if debug:
plt.plot(losses)
nn(X_train, y_train, debug=True, epochs=1000)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment