Skip to content

Instantly share code, notes, and snippets.

@dongguosheng
Last active March 1, 2016 12:55
Show Gist options
  • Save dongguosheng/dc2b01aa1768c71c65fd to your computer and use it in GitHub Desktop.
Save dongguosheng/dc2b01aa1768c71c65fd to your computer and use it in GitHub Desktop.
toy code about gradient descent, newton method.
# -*- coding: gbk -*-
import numpy as np
def foo(x):
return x**2 + 2*x + 1
def g(x):
return 2*x + 2
def h(x):
return 2.0
def gradient_descent(x0, lr=0.1):
x_last = x0 + 1
cnt = 0
print 'start x0: %f' % x0
for _ in xrange(100):
x0 -= lr * g(x0)
print '%d, %f' % (cnt, x0)
cnt += 1
if abs(x0 - x_last) < 0.001:
break
else:
x_last = x0
return x0
def newton_method(x0):
x_last = x0 + 1
cnt = 0
print 'start x0: %f' % x0
for _ in xrange(100):
x0 -= g(x0) / h(x0)
print '%d, %f' % (cnt, x0)
cnt += 1
if abs(x0 - x_last) < 0.001:
break
else:
x_last = x0
return x0
def main():
x0 = np.random.rand()
gradient_descent(x0)
newton_method(x0)
if __name__ == '__main__':
main()
@dongguosheng
Copy link
Author

need to use line search to decide step size(learning rate)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment