Skip to content

Instantly share code, notes, and snippets.

@Yash-567
Created July 21, 2020 18:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Yash-567/3da2cc10ffc261f565d5cfa0b040f544 to your computer and use it in GitHub Desktop.
Save Yash-567/3da2cc10ffc261f565d5cfa0b040f544 to your computer and use it in GitHub Desktop.
from sklearn.utils import shuffle
from torch import nn
import torch
import torch.nn.functional as F
N = df.userId.max() + 1 # number of users
M = df.movie_idx.max() + 1 # number of movies
# split into train and test
df = shuffle(df, random_state = 12)
cutoff = int(0.80*len(df))
df_train = df.iloc[:cutoff]
df_test = df.iloc[cutoff:]
# initialize variables
K = 10 # latent dimensionality
mu = df_train.rating.mean()
epochs = 10
class Network(nn.Module):
def __init__(self):
super().__init__()
self.u_embeddings = nn.Embedding(N, K)
self.m_embeddings = nn.Embedding(M, K)
self.flatten = nn.Flatten()
self.hidden = nn.Linear(2*K, 400)
self.output = nn.Linear(400, 1)
def forward(self, u,m):
u = u.view(-1,1)
m = m.view(-1,1)
u_embedding = self.u_embeddings(u)
m_embedding = self.m_embeddings(m)
u_embedding = u_embedding.view(-1,10)
m_embedding = m_embedding.view(-1,10)
x = torch.cat([u_embedding, m_embedding], dim=1)
x = F.relu(self.hidden(x))
x = self.output(x)
return x
model = Network()
model.double()
from torch import optim
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr = 0.08, momentum=0.9)
######GPU training
import random
from tqdm import tqdm
epochs = 25
train_losses, test_losses = [], []
for e in range(epochs):
running_loss = 0
for i in tqdm(range(0, len(df_train), 128)):
train = df_train[i:i+128]
optimizer.zero_grad()
logits = model(torch.tensor(train.userId.values).to(device), torch.tensor(train.movie_idx.values).to(device))
labels = torch.tensor(train.rating.values-mu).view(-1,1).to(device)
loss = criterion(logits, labels)
running_loss += loss.item()
loss.backward()
optimizer.step()
else:
test_loss = 0
accuracy = 0
torch.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, 'Checkpoint{epoch:01d}.pth'.format(epoch=e))
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
model.eval()
for i in tqdm(range(0, len(df_test), 128)):
test = df_test[i:i+128]
logits = model(torch.tensor(test.userId.values).to(device), torch.tensor(test.movie_idx.values).to(device))
labels = torch.tensor(test.rating.values-mu).view(-1,1).to(device)
loss = criterion(logits, labels)
test_loss += loss.item()
model.train()
train_losses.append((running_loss*128)/len(df_train))
test_losses.append((test_loss*128)/len(df_test))
print("\nEpoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.5f}.. ".format(train_losses[-1]),
"Test Loss: {:.5f}.. ".format(test_losses[-1]))
# "Test Accuracy: {:.5f}".format(accuracy/len(testloader)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment