Skip to content

Instantly share code, notes, and snippets.

@oscar-defelice
Last active June 3, 2020 07:50
Show Gist options
  • Save oscar-defelice/7026367b65af81fee5e7eb616a1da989 to your computer and use it in GitHub Desktop.
Save oscar-defelice/7026367b65af81fee5e7eb616a1da989 to your computer and use it in GitHub Desktop.
def build_model(n_users, n_items, emb_dim = 30):
'''
Define the Keras Model for training
Parameters
----------
n_users : int
number of users
n_items : int
number of items
user_features : list of str
list of categorical features (columns of df_users)
item_features : list of str
list of categorical features (columns of df_items)
emb_dim : int
dimension of the embedding space
'''
n_user_features = 3
n_item_features = 18
### Input Layers
user_input = Input((n_user_features,), name='user_input')
positive_item_input = Input((n_item_features,), name='pos_item_input')
negative_item_input = Input((n_item_features,), name='neg_item_input')
inputs = [user_input, positive_item_input, negative_item_input]
### Embedding Layers
user_emb = Embedding(n_users, emb_dim, input_length=n_user_features, name='user_emb')
# Positive and negative items will share the same embedding
item_emb = Embedding(n_items, emb_dim, input_length=n_item_features, name='item_emb')
# Layer to convert embedding vectors in the same dimensional vectors
vec_conv32 = Dense(32, name = 'dense_vec32', activation = 'relu')
vec_conv = Dense(emb_dim, name = 'dense_vec', activation = 'softmax')
# Anchor
a = Flatten(name = 'flatten_usr_emb')(user_emb(user_input))
a = Dense(emb_dim, name = 'dense_user', activation = 'softmax')(a)
# Positive
p = Flatten(name = 'flatten_pos_emb')(item_emb(positive_item_input))
p = vec_conv32(p)
p = vec_conv(p)
# Negative
n = Flatten(name = 'flatten_neg_emb')(item_emb(negative_item_input))
n = vec_conv32(n)
n = vec_conv(n)
# Score layers
p_rec_score = ScoreLayer(name='pos_recommendation_score')([a, p])
n_rec_score = ScoreLayer(name='neg_recommendation_score')([a, n])
# TripletLoss Layer
loss_layer = TripletLossLayer(name='triplet_loss_layer')([a, p, n])
# Connect the inputs with the outputs
network_train = Model(inputs=inputs, outputs=loss_layer, name = 'training_model')
network_predict = Model(inputs=inputs[:-1], outputs=p_rec_score, name = 'inference_model')
# return the model
return network_train, network_predict
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment