-
-
Save qwertpi/8b28bb260db8d290c20e9e8562f9d9f1 to your computer and use it in GitHub Desktop.
Keras code that always gives a 0
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[[32849 0] | |
[ 7215 0]] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from sklearn.metrics import confusion_matrix | |
from keras.preprocessing.text import Tokenizer | |
from keras.preprocessing.sequence import pad_sequences | |
from keras.models import load_model | |
from sklearn.model_selection import train_test_split | |
def load(file): | |
X=[] | |
Y=[] | |
with open(file,encoding="utf-8") as f: | |
for line in f.readlines(): | |
X.append(line.lower().strip("\n").split("|")[0]) | |
Y.append(int(line.lower().strip("\n").split("|")[1])) | |
return X,Y | |
model=load_model('spaces.h5') | |
X,Y = load("formatted.csv") | |
T_2 = Tokenizer(filters='',char_level=True) | |
T_2.fit_on_texts(X) | |
X=T_2.texts_to_sequences(X) | |
X=pad_sequences(X, maxlen=1200) | |
y_pred = model.predict(X) | |
y_pred = (y_pred > 0.5) | |
cm = confusion_matrix(Y, y_pred) | |
print(cm) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.preprocessing.text import Tokenizer | |
from keras.preprocessing.sequence import pad_sequences | |
from sklearn.model_selection import train_test_split | |
import numpy as np | |
from keras import callbacks | |
def load(file): | |
X=[] | |
Y=[] | |
with open(file,encoding="utf-8") as f: | |
for line in f.readlines(): | |
X.append(line.lower().strip("\n").split("|")[0]) | |
Y.append(int(line.lower().strip("\n").split("|")[1])) | |
return X,Y | |
X,Y = load("formatted.csv") | |
T_2 = Tokenizer(filters='',char_level=True) | |
T_2.fit_on_texts(X) | |
X=T_2.texts_to_sequences(X) | |
X=pad_sequences(X, maxlen=1200) | |
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) | |
model = Sequential() | |
model.add(Dense(256, input_dim=1200, activation='sigmoid')) | |
model.add(Dense(128, activation='sigmoid')) | |
model.add(Dense(26, activation='sigmoid')) | |
model.add(Dense(1, activation='relu')) | |
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy']) | |
earlystop=callbacks.EarlyStopping(monitor='val_loss', | |
min_delta=0, | |
patience=5, | |
verbose=0, mode='auto') | |
callbacks_list=[earlystop] | |
model.fit(x_train, y_train, epochs=100, batch_size=20, validation_data=(x_test, y_test),callbacks=callbacks_list) | |
scores = model.evaluate(X, Y) | |
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) | |
model.save('model.h5') | |
import pickle | |
with open('tokenizer.pickle', 'wb') as handle: | |
pickle.dump(T_2, handle, protocol=pickle.HIGHEST_PROTOCOL) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment