Created
March 14, 2019 21:41
-
-
Save victorkohler/ae47c4f816cf6b54b46cab368acfca0c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
with graph.as_default(): | |
''' | |
Loss function: | |
-SUM ln σ(xui - xuj) + λ(w1)**2 + λ(w2)**2 + λ(w3)**2 ... | |
ln = the natural log | |
σ(xuij) = the sigmoid function of xuij. | |
λ = lambda regularization value. | |
||W||**2 = the squared L2 norm of our model parameters. | |
''' | |
# Input into our model, in this case our user (u), | |
# known item (i) an unknown item (i) triplets. | |
u = tf.placeholder(tf.int32, shape=(None, 1)) | |
i = tf.placeholder(tf.int32, shape=(None, 1)) | |
j = tf.placeholder(tf.int32, shape=(None, 1)) | |
# User feature embedding | |
u_factors = embed(u, len(users), num_factors, 'user_factors') # U matrix | |
# Known and unknown item embeddings | |
item_factors = init_variable(len(artists), num_factors, "item_factors") # V matrix | |
i_factors = tf.nn.embedding_lookup(item_factors, i) | |
j_factors = tf.nn.embedding_lookup(item_factors, j) | |
# i and j bias embeddings. | |
item_bias = init_variable(len(artists), 1, "item_bias") | |
i_bias = tf.nn.embedding_lookup(item_bias, i) | |
i_bias = tf.reshape(i_bias, [-1, 1]) | |
j_bias = tf.nn.embedding_lookup(item_bias, j) | |
j_bias = tf.reshape(j_bias, [-1, 1]) | |
# Calculate the dot product + bias for known and unknown | |
# item to get xui and xuj. | |
xui = i_bias + tf.reduce_sum(u_factors * i_factors, axis=2) | |
xuj = j_bias + tf.reduce_sum(u_factors * j_factors, axis=2) | |
# We calculate xuij. | |
xuij = xui - xuj | |
# Calculate the mean AUC (area under curve). | |
# if xuij is greater than 0, that means that | |
# xui is greater than xuj (and thats what we want). | |
u_auc = tf.reduce_mean(tf.to_float(xuij > 0)) | |
# Output the AUC value to tensorboard for monitoring. | |
tf.summary.scalar('auc', u_auc) | |
# Calculate the squared L2 norm ||W||**2 multiplied by λ. | |
l2_norm = tf.add_n([ | |
lambda_user * tf.reduce_sum(tf.multiply(u_factors, u_factors)), | |
lambda_item * tf.reduce_sum(tf.multiply(i_factors, i_factors)), | |
lambda_item * tf.reduce_sum(tf.multiply(j_factors, j_factors)), | |
lambda_bias * tf.reduce_sum(tf.multiply(i_bias, i_bias)), | |
lambda_bias * tf.reduce_sum(tf.multiply(j_bias, j_bias)) | |
]) | |
# Calculate the loss as ||W||**2 - ln σ(Xuij) | |
#loss = l2_norm - tf.reduce_mean(tf.log(tf.sigmoid(xuij))) | |
loss = -tf.reduce_mean(tf.log(tf.sigmoid(xuij))) + l2_norm | |
# Train using the Adam optimizer to minimize | |
# our loss function. | |
opt = tf.train.AdamOptimizer(learning_rate=lr) | |
step = opt.minimize(loss) | |
# Initialize all tensorflow variables. | |
init = tf.global_variables_initializer() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment