Skip to content

Instantly share code, notes, and snippets.

@dstein64
Last active September 2, 2016 15:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dstein64/c08c57707962b3033dc56b441bcd28e3 to your computer and use it in GitHub Desktop.
Save dstein64/c08c57707962b3033dc56b441bcd28e3 to your computer and use it in GitHub Desktop.
def matrix_factorization_quux(
D, P, Q, steps=5000, alpha=0.0002, beta=0.02):
K = P.shape[1]
P = np.copy(P)
Q = np.copy(Q)
for step in xrange(steps):
for i in xrange(len(D)):
for j in xrange(len(D[i])):
if not getmask(D)[i, j]:
eij = D[i, j] - np.dot(P[i, :], Q[:, j])
for k in xrange(K):
P[i, k] = P[i, k] + alpha * (2 * eij * Q[k, j] - beta * P[i, k])
Q[k, j] = Q[k, j] + alpha * (2 * eij * P[i, k] - beta * Q[k, j])
return P, Q
if __name__ == '__main__':
D = np.array([[5, 3, -1, 1],
[4, -1, -1, 1],
[1, 1, -1, 5],
[1, -1, -1, 4],
[-1, 1, 5, 5]])
D = ma.masked_array(D, mask=D==-1)
m, n = D.shape
K = 2
P = np.random.rand(m, K)
Q = np.random.rand(K, n)
np.set_printoptions(formatter={'all': lambda x: str(x).rjust(2)})
print 'Ratings Matrix\n', D, '\n'
np.set_printoptions(precision = 2, formatter=None)
P_theano_bgd, Q_theano_bgd = matrix_factorization_bgd(D, P, Q)
print 'Theano Batch Gradient Descent\n',\
np.dot(P_theano_bgd, Q_theano_bgd), '\n'
P_theano_sgd, Q_theano_sgd = matrix_factorization_sgd(D, P, Q)
print 'Theano Stochastic Gradient Descent\n',\
np.dot(P_theano_sgd, Q_theano_sgd), '\n'
P_quux, Q_quux = matrix_factorization_quux(D, P, Q)
print 'quuxlabs\n', np.dot(P_quux, Q_quux), '\n'
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment