Skip to content

Instantly share code, notes, and snippets.

@alextp
Created January 28, 2011 01:07
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save alextp/799628 to your computer and use it in GitHub Desktop.
Save alextp/799628 to your computer and use it in GitHub Desktop.
import numpy as np, scipy, scipy.sparse, numpy.linalg, scipy.optimize
from scipy import weave
def project_l1(lbda, sigma):
"Project positive vector lbda to have l1 norm sigma"
ll = -np.sort(-lbda)
cs = 0.
theta = 0
prevtheta = 0
cs = np.cumsum(ll)
vtheta = (cs-sigma)/(np.arange(ll.shape[0])+1.)
i = np.argmin(ll-vtheta >= 0)-1
prevtheta = vtheta[i]
a = lbda - prevtheta
a *= a > 0
return a
def norm(dif):
"Using np.sum instead of np.dot to never ever have a 1-element array as result"
return np.sum(dif*dif)
def getrow(M, i):
return np.asarray(M.getrow(i).todense()).reshape(-1)
def setrow(M, i, v):
J = v.nonzero()[0]
Nj = len(J)
weave.inline("""
int j;
for (j = 0; j < Nj; ++j) {
double d = v(J(j));
int ii = i;
PyObject_CallMethod(M, "__setitem__", "((ll)d)", ii,j, d);
}""", ["J", "M", "v", "Nj", "i"],
type_converters=weave.converters.blitz)
def recerror(orig, t1, t2):
"The error of reconstructing orig as t1*t2"
err = 0.
for i in xrange(orig.shape[0]):
oi = getrow(orig, i)
t1i = getrow(t1, i)
err += norm(oi-t2*t1i)
return err
class MatModel(object):
def __init__(self, M, alpha, Ntopics, lrate, nsteps):
"""M is the sparse matrix we want to decompose,
alpha is the maximum l1 norm for each line of the factorized matrices
Ntopics is the number of latent components to use
lrate is the learning rate to use in the gradient descent
nsteps is how many gradient-and-project steps are used at each iteration"""
self.na, self.nb = M.shape
self.Ntopics = Ntopics
self.alpha = alpha
self.lrate = lrate
self.nsteps = nsteps
self.M = M
self.Tl = scipy.sparse.csr_matrix(np.random.random((self.na,self.Ntopics)))
self.Tr = scipy.sparse.csr_matrix(np.random.random((self.nb,self.Ntopics)))
def iterate(self):
print "Maximizing tl"
self.optimize_l(self.M, self.Tl, self.Tr)
print "error =", self.error()
print "Maximizing tr"
self.optimize_l(self.M.T, self.Tr, self.Tl)
print "error =", self.error()
def error(self):
return recerror(self.M, self.Tl, self.Tr)
def optimize_l(self, M, l1, l2):
"""Code to optimize either Tl or Tr.
The optimization is a gradient-descent step followed by an l1 projection step."""
nt = np.dot(l2.T,l2).toarray()
for i in xrange(M.shape[0]):
ai = getrow(M, i)
tai = getrow(l1, i)
for j in xrange(self.nsteps):
grad = (np.dot(nt,tai)-(l2.T*ai).reshape(-1))
tai -= self.lrate*grad/np.sqrt(norm(grad))
tai = project_l1(tai, self.alpha)
setrow(l1, i, tai)
@ogrisel
Copy link

ogrisel commented Jan 28, 2011

I think I found a bug on line 90: you are redefining the variable i which makes the following setrow behave wrongly

@alextp
Copy link
Author

alextp commented Jan 28, 2011

@ogrisel, I don't see the bug. Line 90 just computes the gradient, no?

@ogrisel
Copy link

ogrisel commented Jan 28, 2011

you have 2 nested for loops with the same iteration variable "i": the first is iterating of the row indices (hence the setrow call is valid with i as an argument) but the inner for loop is changing the value of variable "i" on xrange(self.nsteps) and might hence break the setrow call in IndexOutOfRange errors (or something similar if the number of steps is larger than M.shape[0]) + the wrong row gets updated otherwise I guess.

@alextp
Copy link
Author

alextp commented Jan 28, 2011

@ogrisel: yes, you're perfectly correct, I fixed it, thanks :-)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment