Created
June 12, 2020 06:47
-
-
Save leehanchung/8da991bf1264c19324920349171386bc to your computer and use it in GitHub Desktop.
Tensorflow Text Classification Tutorial
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow_datasets as tfds | |
import tensorflow as tf | |
import matplotlib.pyplot as plt | |
def plot_graphs(history, metric): | |
plt.plot(history.history[metric]) | |
plt.plot(history.history['val_'+metric], '') | |
plt.xlabel("Epochs") | |
plt.ylabel(metric) | |
plt.legend([metric, 'val_'+metric]) | |
plt.show() | |
dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, | |
as_supervised=True) | |
train_dataset, test_dataset = dataset['train'], dataset['test'] | |
encoder = info.features['text'].encoder | |
print('Vocabulary size: {}'.format(encoder.vocab_size)) | |
sample_string = 'Hello TensorFlow.' | |
encoded_string = encoder.encode(sample_string) | |
print('Encoded string is {}'.format(encoded_string)) | |
original_string = encoder.decode(encoded_string) | |
print('The original string: "{}"'.format(original_string)) | |
assert original_string == sample_string | |
for index in encoded_string: | |
print('{} ----> {}'.format(index, encoder.decode([index]))) | |
BUFFER_SIZE = 10000 | |
BATCH_SIZE = 64 | |
train_dataset = train_dataset.shuffle(BUFFER_SIZE) | |
train_dataset = train_dataset.padded_batch(BATCH_SIZE) | |
test_dataset = test_dataset.padded_batch(BATCH_SIZE) | |
for example_batch, label_batch in train_dataset.take(20): | |
print("Batch shape:", example_batch.shape) | |
print("label shape:", label_batch.shape) | |
model = tf.keras.Sequential([ | |
tf.keras.layers.Embedding(encoder.vocab_size, 64), | |
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), | |
tf.keras.layers.Dense(64, activation='relu'), | |
tf.keras.layers.Dense(1) | |
]) | |
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), | |
optimizer=tf.keras.optimizers.Adam(1e-4), | |
metrics=['accuracy']) | |
history = model.fit(train_dataset, epochs=10, | |
validation_data=test_dataset, | |
validation_steps=30) | |
test_loss, test_acc = model.evaluate(test_dataset) | |
print('Test Loss: {}'.format(test_loss)) | |
print('Test Accuracy: {}'.format(test_acc)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment