Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
def matrix_factorization_sgd(
D, P, Q, steps=5000, alpha=0.0002, beta=0.02):
P = theano.shared(P.astype(floatX))
Q = theano.shared(Q.astype(floatX))
P_i = T.vector()
Q_j = T.vector()
i = T.iscalar()
j = T.iscalar()
x = T.scalar()
error = T.sqr(P_i.dot(Q_j) - x)
regularization = (beta/2.0) * (P_i.dot(P_i) + Q_j.dot(Q_j))
cost = error + regularization
gp, gq = T.grad(cost=cost, wrt=[P_i, Q_j])
train = theano.function(inputs=[i, j, x],
givens=[(P_i, P[i, :]), (Q_j, Q[:, j])],
updates=[(P, T.inc_subtensor(P[i, :], -gp * alpha)),
(Q, T.inc_subtensor(Q[:, j], -gq * alpha))])
for _ in xrange(steps):
for (row, col), val in np.ndenumerate(D):
if not getmask(D)[row, col]:
train(row, col, val)
return P.get_value(), Q.get_value()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.