Skip to content

Instantly share code, notes, and snippets.

@dstein64
Last active September 2, 2016 15:16
Show Gist options
  • Save dstein64/c541c5c79a4731cb6ee66acbd8423f50 to your computer and use it in GitHub Desktop.
Save dstein64/c541c5c79a4731cb6ee66acbd8423f50 to your computer and use it in GitHub Desktop.
def matrix_factorization_sgd(
D, P, Q, steps=5000, alpha=0.0002, beta=0.02):
P = theano.shared(P.astype(floatX))
Q = theano.shared(Q.astype(floatX))
P_i = T.vector()
Q_j = T.vector()
i = T.iscalar()
j = T.iscalar()
x = T.scalar()
error = T.sqr(P_i.dot(Q_j) - x)
regularization = (beta/2.0) * (P_i.dot(P_i) + Q_j.dot(Q_j))
cost = error + regularization
gp, gq = T.grad(cost=cost, wrt=[P_i, Q_j])
train = theano.function(inputs=[i, j, x],
givens=[(P_i, P[i, :]), (Q_j, Q[:, j])],
updates=[(P, T.inc_subtensor(P[i, :], -gp * alpha)),
(Q, T.inc_subtensor(Q[:, j], -gq * alpha))])
for _ in xrange(steps):
for (row, col), val in np.ndenumerate(D):
if not getmask(D)[row, col]:
train(row, col, val)
return P.get_value(), Q.get_value()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment