Skip to content

Instantly share code, notes, and snippets.

@eggie5
Created September 27, 2018 13:20
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save eggie5/261746024a940332a743b6d2310a5d9d to your computer and use it in GitHub Desktop.
Save eggie5/261746024a940332a743b6d2310a5d9d to your computer and use it in GitHub Desktop.
import numpy as np
from sklearn.base import BaseEstimator
from keras.layers import Input, Embedding, Dense,Flatten ,Activation, Add, Dot
from keras.models import Model
from keras.regularizers import l2 as l2_reg
from keras import initializers
import itertools
def build_model(max_features,K=8,solver='adam',l2=0.0,l2_fm = 0.0):
inputs = []
flatten_layers=[]
columns = range(len(max_features))
for c in columns:
inputs_c = Input(shape=(1,), dtype='int32',name = 'input_%s'%c)
num_c = max_features[c]
embed_c = Embedding(num_c,K,input_length=1,name = 'embed_%s'%c,embeddings_regularizer=l2_reg(l2_fm))(inputs_c)
flatten_c = Flatten()(embed_c)
inputs.append(inputs_c)
flatten_layers.append(flatten_c)
fm_layers = []
for emb1,emb2 in itertools.combinations(flatten_layers, 2):
dot_layer = Dot(1)([emb1,emb2])
fm_layers.append(dot_layer)
for c in columns:
num_c = max_features[c]
embed_c = Embedding(num_c,1,input_length=1,name = 'bias_%s'%c,embeddings_regularizer=l2_reg(l2))(inputs[c])
flatten_c = Flatten()(embed_c)
fm_layers.append(flatten_c)
#flatten = merge(fm_layers,mode='sum')
flatten = Add()(fm_layers)
outputs = Activation('sigmoid',name='outputs')(flatten)
model = Model(input=inputs, output=outputs)
model.compile(
optimizer=solver,
loss= 'binary_crossentropy'
)
return model
if __name__ == '__main__':
from keras.utils import plot_model
num_users=100
num_items=100
k=10
mdl = build_model([num_users, num_items], K=k)
mdl.summary()
#plot_model(mdl, to_file="fm.png")
print(mdl.predict([[1,2],[1,2]]))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment