Skip to content

Instantly share code, notes, and snippets.

@BalazsHoranyi
Last active May 31, 2018 16:59
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save BalazsHoranyi/0b99c49dc275e3db305154b21a71a65b to your computer and use it in GitHub Desktop.
Save BalazsHoranyi/0b99c49dc275e3db305154b21a71a65b to your computer and use it in GitHub Desktop.
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def gpu(tensor, gpu=False):
if gpu:
return tensor.cuda()
else:
return tensor
class ScaledEmbedding(nn.Embedding):
"""
Embedding layer that initialises its values
to using a normal variable scaled by the inverse
of the embedding dimension.
Resources
------------
https://github.com/maciejkula/spotlight/blob/master/spotlight/layers.py
"""
def reset_parameters(self):
"""
Initialize parameters.
"""
self.weight.data.normal_(0, 1.0 / self.embedding_dim)
if self.padding_idx is not None:
self.weight.data[self.padding_idx].fill_(0)
class ZeroEmbedding(nn.Embedding):
"""
Embedding layer that initialises its values
to using a normal variable scaled by the inverse
of the embedding dimension.
Used for biases.
Resources
------------
https://github.com/maciejkula/spotlight/blob/master/spotlight/layers.py
"""
def reset_parameters(self):
"""
Initialize parameters.
"""
self.weight.data.zero_()
if self.padding_idx is not None:
self.weight.data[self.padding_idx].fill_(0)
class NeuralMatrixFactorization(nn.Module):
"""
Nueral Matrix Factorization Representation
Parameters
----------
num_users: int
Number of users in the model.
num_items: int
Number of items in the model.
embedding_dim: int, optional
Dimensionality of the latent representations.
sparse: boolean, optional
Use sparse gradients.
use_cuda_embeddings: boolean, optional
Dump embeddings on gpu before hand (Could be faster but be careful with memory!)
activation_type: str, optional
see creek.deep.representations.activations for options.
Resources
----------
He, Xiangnan, et al. "Neural collaborative filtering." Proceedings of the 26th International Conference on World Wide Web.
International World Wide Web Conferences Steering Committee, 2017.
"""
def __init__(self, num_users, num_items, embedding_dim=32, sparse=False,
activation_type='relu', use_cuda_embeddings=False):
super(NeuralMatrixFactorization, self).__init__()
self.embedding_dim = embedding_dim
self.use_cuda_embeddings = use_cuda_embeddings
self.activation_type = activation_type
self.user_embeddings_mlp = gpu(ScaledEmbedding(num_users, embedding_dim,
sparse=sparse), self.use_cuda_embeddings)
self.user_embeddings_mf = gpu(ScaledEmbedding(num_users, embedding_dim,
sparse=sparse), self.use_cuda_embeddings)
self.item_embeddings_mlp = gpu(ScaledEmbedding(num_items, embedding_dim,
sparse=sparse), self.use_cuda_embeddings)
self.item_embeddings_mf = gpu(ScaledEmbedding(num_items, embedding_dim,
sparse=sparse), self.use_cuda_embeddings)
self.item_biases_mf = gpu(ZeroEmbedding(num_items, 1, sparse=sparse), self.use_cuda_embeddings)
self.item_biases_mlp = gpu(ZeroEmbedding(num_items, 1, sparse=sparse), self.use_cuda_embeddings)
self.input_size = embedding_dim * 2
self.output_size = 1
self.fc_layers = torch.nn.ModuleList()
layers = [self.embedding_dim * 2, self.embedding_dim,
self.embedding_dim / 2, self.embedding_dim / 4]
layers = [int(layer) for layer in layers]
for idx, (in_size, out_size) in enumerate(zip(layers[:-1], layers[1:])):
self.fc_layers.append(torch.nn.Linear(in_size, out_size))
self.fc_layers = self.fc_layers.to(device)
self.output = torch.nn.Linear(int(self.embedding_dim / 4) + self.embedding_dim, out_features=1).to(device)
def forward(self, user_ids, item_ids):
"""
Compute the forward pass of the representation.
"""
user_embedding_mlp = self.user_embeddings_mlp(user_ids).to(device).squeeze()
item_embedding_mlp = self.item_embeddings_mlp(item_ids).to(device).squeeze()
user_embedding_mlp = F.dropout(user_embedding_mlp, 0.5)
item_embedding_mlp = F.dropout(item_embedding_mlp, 0.5)
item_bias_mlp = self.item_biases_mlp(item_ids).to(device).squeeze()
user_embedding_mf = self.user_embeddings_mf(user_ids).to(device).squeeze()
item_embedding_mf = self.item_embeddings_mf(item_ids).to(device).squeeze()
user_embedding_mf = F.dropout(user_embedding_mf, 0.5)
item_embedding_mf = F.dropout(item_embedding_mf, 0.5)
item_bias_mf = self.item_biases_mf(item_ids).to(device).squeeze()
# Vanilla Matrix Factorization
vector_mf = torch.mul(user_embedding_mf, item_embedding_mf)
vector_mf = vector_mf + item_bias_mf.unsqueeze(1)
# Multi Layer Perceptron
vector_mlp = torch.cat((user_embedding_mlp, item_embedding_mlp + item_bias_mlp.unsqueeze(1)), 1)
for idx, _ in enumerate(range(len(self.fc_layers))):
vector_mlp = self.fc_layers[idx](vector_mlp)
vector_mlp = torch.nn.ReLU()(vector_mlp)
vector_mlp = F.dropout(vector_mlp, 0.5)
vector = torch.cat((F.dropout(vector_mf, 0.5), F.dropout(vector_mlp, 0.5)), 1)
rating = self.output(vector)
return rating
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment