Skip to content

Instantly share code, notes, and snippets.

@RahulDas-dev
Created February 14, 2023 19:45
Show Gist options
  • Save RahulDas-dev/f3bcddc38be1b060c5bab3faae658c20 to your computer and use it in GitHub Desktop.
Save RahulDas-dev/f3bcddc38be1b060c5bab3faae658c20 to your computer and use it in GitHub Desktop.
matrix factorization using gradient descent
import numpy as np
# Frobenius norm (X) = Tr(X.X_T) X_T = Transpose of X
# d(Frobenius norm)/dX = 2X
# M ~ L*R
# R = l*r - M
# Loss_fuction = NORM(R) to be minimized
# Loss_fuction = NORM(R)
# d(Loss_fuction)/dl = d(NORM(R))/dl = d(NORM(R))/dR * dR/dl = 2R.r_T
# d(Loss_fuction)/dr = d(NORM(R))/dr = dR/dl * d(NORM(R))/dR = 2l_t.R
def mat_factor(input_mat,d,epoch=100, lr=1e-2):
m,n = input_mat.shape
L = np.random.normal(size=(m,d)) # random initialization of Left Mat
R = np.random.normal(size=(d,n)) # random initialization of Right Mat
for i in range(1,epoch+1): # gradient descent loop
residual = L.dot(R)-input_mat
dL = 2*residual.dot(R.T) # Gradient Calculation
dR = 2*L.T.dot(residual ) # Gradient Calculation
L = L - lr*dL # Update L
R = R - lr*dR # Update R
if i%10 == 0:
print(f'epoch: {i}, residual: {np.linalg.norm(residual)} ') #Looging
return L,R
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment