Skip to content

Instantly share code, notes, and snippets.

@victorkohler
Last active June 12, 2019 20:44
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save victorkohler/c07c3f264ac8d76a225c9be2c9700a14 to your computer and use it in GitHub Desktop.
Save victorkohler/c07c3f264ac8d76a225c9be2c9700a14 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
import pandas as pd
import math
import heapq
from tqdm import tqdm
# Load and prepare our data.
uids, iids, df_train, df_test, df_neg, users, items, item_lookup = load_dataset()
#-------------
# HYPERPARAMS
#-------------
num_neg = 4
epochs = 20
batch_size = 256
learning_rate = 0.001
#-------------------------
# TENSORFLOW GRAPH
#-------------------------
# Set up our Tensorflow graph
graph = tf.Graph()
with graph.as_default():
# Define input placeholders for user, item and label.
user = tf.placeholder(tf.int32, shape=(None, 1))
item = tf.placeholder(tf.int32, shape=(None, 1))
label = tf.placeholder(tf.int32, shape=(None, 1))
# User feature embedding
u_var = tf.Variable(tf.random_normal([len(users), 32], stddev=0.05), name='user_embedding')
user_embedding = tf.nn.embedding_lookup(u_var, user)
# Item feature embedding
i_var = tf.Variable(tf.random_normal([len(items), 32], stddev=0.05), name='item_embedding')
item_embedding = tf.nn.embedding_lookup(i_var, item)
# Flatten our user and item embeddings.
user_embedding = tf.keras.layers.Flatten()(user_embedding)
item_embedding = tf.keras.layers.Flatten()(item_embedding)
# Concatenate our two embedding vectors together
concatenated = tf.keras.layers.concatenate([user_embedding, item_embedding])
# Add a first dropout layer.
dropout = tf.keras.layers.Dropout(0.2)(concatenated)
# Below we add our four hidden layers along with batch
# normalization and dropouts. We use relu as the activation function.
layer_1 = tf.keras.layers.Dense(64, activation='relu', name='layer1')(dropout)
batch_norm1 = tf.keras.layers.BatchNormalization(name='batch_norm1')(layer_1)
dropout1 = tf.keras.layers.Dropout(0.2, name='dropout1')(batch_norm1)
layer_2 = tf.keras.layers.Dense(32, activation='relu', name='layer2')(layer_1)
batch_norm2 = tf.keras.layers.BatchNormalization(name='batch_norm1')(layer_2)
dropout2 = tf.keras.layers.Dropout(0.2, name='dropout1')(batch_norm2)
layer_3 = tf.keras.layers.Dense(16, activation='relu', name='layer3')(layer_2)
layer_4 = tf.keras.layers.Dense(8, activation='relu', name='layer4')(layer_3)
# Our final single neuron output layer.
output_layer = tf.keras.layers.Dense(1,
kernel_initializer="lecun_uniform",
name='output_layer')(layer_4)
# Define our loss function as binary cross entropy.
labels = tf.cast(label, tf.float32)
logits = output_layer
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels,
logits=logits))
# Train using the Adam optimizer to minimize our loss.
opt = tf.train.AdamOptimizer(learning_rate = learning_rate)
step = opt.minimize(loss)
# Initialize all tensorflow variables.
init = tf.global_variables_initializer()
session = tf.Session(config=None, graph=graph)
session.run(init)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment