Skip to content

Instantly share code, notes, and snippets.

@lettergram
Last active January 4, 2019 15:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save lettergram/6c57bf81bc61b07efc2cc59ba1b333bf to your computer and use it in GitHub Desktop.
Save lettergram/6c57bf81bc61b07efc2cc59ba1b333bf to your computer and use it in GitHub Desktop.
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding, Conv1D, GlobalMaxPooling1D
model = Sequential()
# Created Embedding (Input) Layer (max_words) --> Convolutional Layer
model.add(Embedding(max_words, embedding_dims, input_length=maxlen))
model.add(Dropout(0.2)) # masks various input values
# Create the convolutional layer
model.add(Conv1D(filters, kernel_size,padding='valid', activation='relu', strides=1))
# Create the pooling layer
model.add(GlobalMaxPooling1D())
# Create the fully connected layer
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# Create the output layer (num_classes)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# Add optimization method, loss function and optimization value
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# "Fit the model" (train model), using training data (80% of datset)
model.fit(x_train, y_train, batch_size=batch_size,
epochs=epochs, validation_data=(x_test, y_test))
# Evaluate the trained model, using the test data (20% of the dataset)
score = model.evaluate(x_test, y_test, batch_size=batch_size)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment