Skip to content

Instantly share code, notes, and snippets.

@namachan10777
Created January 1, 2021 19:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save namachan10777/df7aae966a1f005d7422387d92f941d7 to your computer and use it in GitHub Desktop.
Save namachan10777/df7aae966a1f005d7422387d92f941d7 to your computer and use it in GitHub Desktop.
#!/usr/bin/python3
import numpy as np
def sigmoid(x):
return 1/(1+np.exp(-x))
def inv_sigmoid(x):
return (1-sigmoid(x))*sigmoid(x)
def loss_d(y, estimated):
return (y-estimated)
def run(ws, x):
hs = [sigmoid(x)]
ms = [x]
m = x
h = x
for w in ws:
m = w @ h
h = sigmoid(m)
ms.append(m)
hs.append(h)
return hs, ms
batch = [
(np.matrix([0.0,0.0]).T, np.matrix([0.0]).T),
(np.matrix([1.0,0.0]).T, np.matrix([1.0]).T),
(np.matrix([0.0,1.0]).T, np.matrix([1.0]).T),
(np.matrix([0.0,0.0]).T, np.matrix([0.0]).T)
]
layers = [3,3]
ws = []
input_size, _ = batch[0][0].shape
output_size, _ = batch[0][1].shape
layers.append(output_size)
for layer in layers:
ws.append(np.random.rand(layer, input_size))
input_size = layer
a = 0.1
for _ in range(100):
for (x, y) in batch:
ws_ = ws.copy()
hs, ms = run(ws, x)
loss = loss_d(y, hs[-1])[:,0]
for n in reversed(range(len(ws))):
J, I = ws[n].shape
w_ = ws[n].copy()
loss_ = np.zeros([I,1])
for i in range(I):
for j in range(J):
grad = loss[j] * inv_sigmoid(ms[n+1][j,0]) * hs[n+1][j,0]
loss_[i,0] += grad
w_[j,i] -= a * grad
ws_[n] = w_
loss = loss_
ws = ws_
print(ws)
for (x, y) in batch:
hs, _ = run(ws, x)
print(hs[-1], y)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment