Skip to content

Instantly share code, notes, and snippets.

embedding_dim = 20
model_lstm_bi = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim, input_length = maxlen),
keras.layers.Bidirectional(keras.layers.LSTM(embedding_dim)),
keras.layers.Dense(16, activation = 'relu'),
keras.layers.Dense(5),
])
model_lstm_bi.summary()
input_size = 15
hidden_layer_size = 50
model = tf.keras.Sequential([
tf.keras.layers.Dense(hidden_layer_size, activation='relu'),
tf.keras.layers.Dense(hidden_layer_size, activation='relu'),
tf.keras.layers.Dense(hidden_layer_size, activation='relu'),
tf.keras.layers.Dense(2, activation='relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
model = Sequential()
model.add(hub_layer)
model.add(Dense(16, activation = "relu"))
model.add(Dense(1, activation = "sigmoid"))
model = Sequential()
model.add(Conv2D(16,(5,5),input_shape = (256,256,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (4,4)))
model.add(Conv2D(32,(5,5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (4,4)))
model.add(Conv2D(64,(5,5)))
def generate_text_seq(model,tokenizer,text_seq_length,seed_text,n_words):
text = []
for _ in range(n_words):
encoded = tokenizer.texts_to_sequences([seed_text])[0]
encoded = pad_sequences([encoded],maxlen = text_seq_length,truncating = 'pre')
y_predict = model.predict_classes(encoded)
predicted_words = " "
for word,index in tokenizer.word_index.items():
model = Sequential()
model.add(Embedding(vocab_size,50,input_length = 50))
model.add(LSTM(100, return_sequences = True))
model.add(LSTM(100))
model.add(Dense(100,activation = "relu"))
model.add(Dense(vocab_size,activation = "softmax"))
model.summary()
def clean(doc):
tokens = doc.split()
table = str.maketrans("","",string.punctuation)
tokens = [w.translate(table) for w in tokens]
tokens = [word for word in tokens if word.isalpha()]
tokens = [word.lower() for word in tokens]
return tokens
tokens = clean(data)