Skip to content

Instantly share code, notes, and snippets.

@zzag
Created August 2, 2016 23:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save zzag/9cec9136dffd3b27f5ed62bc758b7794 to your computer and use it in GitHub Desktop.
Save zzag/9cec9136dffd3b27f5ed62bc758b7794 to your computer and use it in GitHub Desktop.
Gradient checking
import numpy as np
def eval_gradient_naive(f, x, dx=1e-4):
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# save old value
oldval = x[ix]
# evaluate gradient
x[ix] = oldval - dx
fx1, _ = f(x)
x[ix] = oldval + dx
fx2, _ = f(x)
numgrad = (fx2 - fx1) / (2*dx)
# restore old value
x[ix] = oldval
grad[ix] = numgrad
it.iternext()
return grad
f = lambda x: (np.sum(x**2 + 3*x), 2*x + 3) # cost function
x = np.random.randn(3, 3)
num_grad = eval_gradient_naive(f, x)
_, grad = f(x)
print 'Numerical gradient:'
print num_grad
print 'Analytical gradient:'
print grad
print 'Difference: %f' % np.linalg.norm(num_grad - grad, ord='fro')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment