Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
import autograd.numpy as np
from autograd import grad
def main():
def relu(x):
return np.maximum(x, np.zeros_like(x))
d = 5
n1 = 10
n2 = 15
xpt = np.ones((d,))
ypt = 1
def net(args):
W1, W2, W3 = args
return 0.5 * ((np.dot(W3, relu(np.dot(W2, relu(np.dot(W1, xpt))))) - ypt)**2)
g = grad(net)
W1 = np.random.randn(n1, d)
W2 = np.random.randn(n2, n1)
W3 = np.random.randn(n2)
def compute_grad(args):
W1, W2, W3 = args
# forward pass
z1 = np.dot(W1, xpt)
z2 = np.dot(W2, relu(z1))
z3 = np.dot(W3, relu(z2))
# backward pass
p3 = -(z3 - ypt)
p2 = p3 * W3
p2[z2 <= 0] = 0
p1 = np.dot(W2.T, p2)
p1[z1 <= 0] = 0
return (-np.outer(p1, xpt), -np.outer(p2, relu(z1)), -p3*relu(z2))
ret1 = g((W1, W2, W3))
ret2 = compute_grad((W1, W2, W3))
assert np.allclose(ret1[0], ret2[0])
assert np.allclose(ret1[1], ret2[1])
assert np.allclose(ret1[2], ret2[2])
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment