Skip to content

Instantly share code, notes, and snippets.

@raghavgurbaxani
Created June 25, 2018 19:43
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save raghavgurbaxani/20c08c55eca5e97cd5c51389c091fc9f to your computer and use it in GitHub Desktop.
Save raghavgurbaxani/20c08c55eca5e97cd5c51389c091fc9f to your computer and use it in GitHub Desktop.
Embedding Layer Float 16
'''Trains an LSTM model on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
# Notes
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''
from __future__ import print_function
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
from keras.callbacks import CSVLogger
import sys
from keras import backend as K
keras.backend.set_floatx('float16')
print(K.floatx())
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# try using different optimizers and different optimizer configs
optimize=keras.optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-4, decay=0.0, amsgrad=False)
model.compile(loss='binary_crossentropy',
optimizer=optimize,
metrics=['accuracy'])
print('Train...')
csv_logger = CSVLogger('train_logs/train_log_imdb_lstm.csv', append=True, separator=',')
model.fit(x_train, y_train, callbacks=[csv_logger],
batch_size=batch_size,
epochs=15,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
# save model
model.save("models/model_imdb_lstm.h5")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment