Skip to content

Instantly share code, notes, and snippets.

@edenau
Last active December 27, 2019 16:39
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save edenau/3c737eceb0cddb610748ea9d75991dd6 to your computer and use it in GitHub Desktop.
Save edenau/3c737eceb0cddb610748ea9d75991dd6 to your computer and use it in GitHub Desktop.
def gradient_descent(X, y, w, b, learning_rate):
dw = -2 * np.sum(X * (y - w * X - b)) # ∂e/∂w
db = -2 * np.sum(y - w * X - b) # ∂e/∂b
w_new = w - learning_rate * dw # minus sign since we are minizing e
b_new = b - learning_rate * db
return w_new, b_new
def get_loss(X,y,w,b):
return (y - w * X - b).T @ (y - w * X - b) # square loss,
# .T and @ denote transpose and matrix multiplication resp.
learning_rate = 0.000001
max_epoch = 500
w, b = -1,0
for epoch in range(1,max_epoch+1):
w,b = gradient_descent(X, y, w, b, learning_rate)
if epoch % 50 == 0:
print(f'{get_loss(X,y,w,b):.0f}')
if b > 0:
print(f'y = {w:.2f} x + {b:.2f}')
else:
print(f'y = {w:.2f} x - {-b:.2f}')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment