Skip to content

Instantly share code, notes, and snippets.

View Yash-567's full-sized avatar
🎯
Focusing

Yash Sonar Yash-567

🎯
Focusing
  • Pune, Maharashtra, India
View GitHub Profile
@Yash-567
Yash-567 / scrape-reddit.js
Created July 14, 2019 02:51 — forked from andrewjmead/scrape-reddit.js
Request & Cheerio Scraping Example
var request = require('request');
var cheerio = require('cheerio');
request('https://www.reddit.com/', function (error, response, body) {
$ = cheerio.load(body);
$('a.title').each(function (i, elem) {
console.log('');
console.log('** Link ** ')
console.log($(this).text());
K = 10 # Number of latent features
N = len(set(df.userId.values)) # Number of users
M = len(set(df.movieId.values)) # Number of movies
from torch import nn
import torch
import torch.nn.functional as F
class Network(nn.Module):
model = Network()
model.double()
model
from torch import optim
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr = 0.08, momentum=0.9)
import random
from tqdm import tqdm
train_losses, test_losses = [], []
for e in range(epochs):
running_loss = 0
for i in tqdm(range(0, len(df_train), 128)):
train = df_train[i:i+128]
optimizer.zero_grad()
# keras model
u = Input(shape=(1,))
m = Input(shape=(1,))
u_embedding = Embedding(N, K)(u) # (N, 1, K)
m_embedding = Embedding(M, K)(m) # (N, 1, K)
u_embedding = Flatten()(u_embedding) # (N, K)
m_embedding = Flatten()(m_embedding) # (N, K)
x = Concatenate()([u_embedding, m_embedding]) # (N, 2K)
x = Dense(400)(x)
from sklearn.utils import shuffle
from torch import nn
import torch
import torch.nn.functional as F
N = df.userId.max() + 1 # number of users
M = df.movie_idx.max() + 1 # number of movies
# split into train and test
df = shuffle(df, random_state = 12)
model.compile(
loss='mse',
optimizer=SGD(lr=0.08, momentum=0.9),
metrics=['mse'],
)
mc = keras.callbacks.ModelCheckpoint('weights{epoch:01d}.h5', period=1)
r = model.fit(
x=[df_train.userId.values, df_train.movie_idx.values],
y=df_train.rating.values - mu,
epochs=epochs,
batch_size=128,
validation_data=(
[df_test.userId.values, df_test.movie_idx.values],
df_test.rating.values - mu
),
model_2 = Model(input=model.get_layer('input_2').input, output = model.get_layer('embedding_2').output)
user_movie_embeddings = model_2.predict(x = usrc) # usrc = array of movie id liked by user
user_movie_embeddings = user_movie_embeddings.reshape(len(usrc),10)
user_movie_bias = np.array([5 for temp in range(len(usrc))])
user_embedding, residuals, rank, s = np.linalg.lstsq(user_movie_embeddings,user_movie_bias, rcond=-1) # Get embedding for new user
user_embedding = user_embedding.reshape(1, 10) # User embedding based on choices of user