Skip to content

Instantly share code, notes, and snippets.

@ferryzhou
Last active September 3, 2017 03:53
Show Gist options
  • Save ferryzhou/2ff00abaf293e1587c9663800cc62f2f to your computer and use it in GitHub Desktop.
Save ferryzhou/2ff00abaf293e1587c9663800cc62f2f to your computer and use it in GitHub Desktop.
import numpy as np
# A linear function.
# Ax = y
# Given A and y, solve x.
# Standard L2 loss function with l2 regularization.
N = 1000
M = 2
A = np.random.random((N, M))
x = np.random.random(M)
noise = np.random.random(N) * 0.001
y = np.dot(A, x) + noise
l2_reg = 0.001
# start optimize
r = 0.1 # learning rate
e = 0 # error term
w = np.zeros(M)
for i in range(0, N):
a = A[i, :] # current sample
e = np.dot(a, w) - y[i]
print(e)
g = e * a + l2_reg * w # gradient
w = w - r * g
print(x)
print(w)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment