Skip to content

Instantly share code, notes, and snippets.

@cshjin
Last active September 27, 2019 02:23
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cshjin/f40068e86ce3dcc276c87ee6c358a07c to your computer and use it in GitHub Desktop.
Save cshjin/f40068e86ce3dcc276c87ee6c358a07c to your computer and use it in GitHub Desktop.
Matrix factorization problem implemented in Tensorflow and Matlab (with yalmip)
yalmip('clear')
T = [0 1 0 1; 1 0 1 0; 0 1 0 1; 1 0 1 0];
x = sdpvar(4, 1);
y = sdpvar(4, 1);
assign(x, randn(4, 1));
assign(y, randn(4, 1));
const = [x'*x <= 1; y'*y <=1];
% const = [];
mat = T - x*y';
obj = mat(:)' * mat(:);
options = sdpsettings('solver','fmincon','verbose', 0, 'usex0', 1, 'savesolveroutput', 1);
sol = optimize(const, obj, options);
x_sol = value(x);
y_sol = value(y);
fprintf('%2.4f %2.4f %2.4f\n', value(obj), norm(x_sol), norm(y_sol))
import tensorflow as tf
import numpy as np
T = tf.convert_to_tensor(np.array([[1, 0, 1, 0], [0, 1, 0, 1],
[1, 0, 1, 0], [0, 1, 0, 1]]), dtype='float32')
with tf.Session() as sess:
x = tf.Variable(initial_value=tf.truncated_normal((4, 1)), name='x',
constraint=lambda x: tf.clip_by_norm(x, 1))
y = tf.Variable(initial_value=tf.truncated_normal((4, 1)), name='y',
constraint=lambda x: tf.clip_by_norm(x, 1))
loss = tf.norm(T - tf.matmul(x, tf.transpose(y)))**2
opti = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
sess.run([init])
LOSS = 1000
patient = 5
for i in range(100):
sess.run(opti)
l, x_sol, y_sol = sess.run([loss, x, y])
if l < LOSS:
LOSS = l
X_OPT = x_sol
Y_OPT = y_sol
else:
patient -= 1
# early stopping
if i > 10 and patient <=0:
break
print("%2.4f %2.4f %2.4f" % (l, np.linalg.norm(x_sol), np.linalg.norm(y_sol)))
print("%2.4f %2.4f %2.4f" % (LOSS, np.linalg.norm(X_OPT), np.linalg.norm(Y_OPT)))

Matrix Factorization

Unconstrained

We want to find two matrix $\mathbf{X}, \mathbf{Y}$ such that $$ ||\mathbf{T} - \mathbf{X} \mathbf{Y}^\top||_F^2 $$ is minimized.

Here is the matlab implementation

yalmip('clear')

T = [0 1 0 1; 1 0 1 0; 0 1 0 1; 1 0 1 0];


x = sdpvar(4, 1);
y = sdpvar(4, 1);
assign(x, randn(4, 1));
assign(y, randn(4, 1));
% without constraint
const = [];
mat = T - x*y';
obj = mat(:)' * mat(:);

options = sdpsettings('verbose', 0, 'usex0', 1, 'savesolveroutput', 1);

sol = optimize(const, obj, options);

x_sol = value(x);
y_sol = value(y);
fprintf('%2.4f  %2.4f %2.4f\n', value(obj), norm(x_sol), norm(y_sol))

Here is the python implementation (tensorflow)

import tensorflow as tf
import numpy as np

T = tf.convert_to_tensor(np.array([[1, 0, 1, 0], [0, 1, 0, 1], 
                                   [1, 0, 1, 0], [0, 1, 0, 1]]), dtype='float32')
with tf.Session() as sess:
  x = tf.Variable(initial_value=tf.truncated_normal((4, 3)), name='x')
  y = tf.Variable(initial_value=tf.truncated_normal((4, 3)), name='y')

  loss = tf.norm(T - tf.matmul(x, tf.transpose(y)))**2
  opti = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
  
  init = tf.global_variables_initializer()
  sess.run([init])
  LOSS = 1000
  patient = 5

  for i in range(100):
    sess.run(opti)
    l, x_sol, y_sol = sess.run([loss, x, y])
    if l < LOSS:
      LOSS = l
      X_OPT = x_sol
      Y_OPT = y_sol
    else:
      patient -= 1
    if i > 10 and patient <=0:
      break
    # print("%2.4f   %2.4f   %2.4f" % (l, np.linalg.norm(x_sol), np.linalg.norm(y_sol)))
  print(x_sol)
  print(l)

Constrained

We want to find two matrix $\mathbf{X}, \mathbf{Y}$ such that $$ ||\mathbf{T} - \mathbf{X} \mathbf{Y}^\top||_F^2 $$ is minimized, but with constraint on the $\mathbf{X}, \mathbf{Y}$, $||\mathbf{X}||\le 1, ||\mathbf{Y}|| \le 1$

Constraint on whole matrix

  const = [x'*x <= 1; y'*y <=1];
x = tf.Variable(initial_value=tf.truncated_normal((4, 3)), name='x',
                  constraint=lambda x: tf.clip_by_norm(x, 1))
y = tf.Variable(initial_value=tf.truncated_normal((4, 3)), name='y',
                  constraint=lambda x: tf.clip_by_norm(x, 1))

Constraint on column / row wise

Apply the constraint to each row

const = [];
for idx = 1:d:
  const = [const, x(idx,:)'*x(idx,:) <= 1, y(idx,:)'*y(idx,:) <= 1]
end
x = tf.Variable(initial_value=tf.truncated_normal((4, 3)), name='x',
                  constraint=lambda x: tf.clip_by_norm(x, 1, axes=0))
y = tf.Variable(initial_value=tf.truncated_normal((4, 3)), name='y',
                  constraint=lambda x: tf.clip_by_norm(x, 1, axes=0))

The implementation in scipy.optimize with row constraint

import numpy as np
from scipy.optimize import minimize, fmin
# import scipy.io as spio

np.random.seed(8)
A = np.random.rand(3,3)
A = (np.transpose(A) + A)/2
W = np.random.rand(4,4)

def obj(var):
    var = var.reshape(3, 4)
    return -np.linalg.norm( np.dot(np.dot(A, var),W), 'fro')

def main():

    print("Row constrained problem")
    cons = {'type': 'ineq', 'fun': lambda x: np.array([0.1 - np.linalg.norm(x[i:i*4+4]) for i in range(3)])}
    # COBYLA
    res = minimize(obj, np.random.randn(12),
                   constraints=cons, method='SLSQP', options={'disp': True})

    print(res.x.reshape(3, 4))
    print(res.fun)
    print(np.linalg.norm(res.x[0:4]))

if __name__ == '__main__':
    main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment