
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Install tensorflow for macOS from https://github.com/apple/tensorflow_macos | |
import argparse | |
import tensorflow as tf | |
from tensorflow.keras import datasets, layers, models | |
from tensorflow.python.compiler.mlcompute import mlcompute | |
def get_dataset(hparams): | |
(train_images, train_labels), (test_images, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def evaluate(sentence): | |
sentence = preprocess_sentence(sentence) | |
sentence = tf.expand_dims( | |
START_TOKEN + tokenizer.encode(sentence) + END_TOKEN, axis=0) | |
output = tf.expand_dims(START_TOKEN, 0) | |
for i in range(MAX_LENGTH): | |
predictions = model(inputs=[sentence, output], training=False) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def loss_function(y_true, y_pred): | |
y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1)) | |
loss = tf.keras.losses.SparseCategoricalCrossentropy( | |
from_logits=True, reduction='none')(y_true, y_pred) | |
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32) | |
loss = tf.multiply(loss, mask) | |
return tf.reduce_mean(loss) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
NUM_LAYERS = 2 | |
D_MODEL = 256 | |
NUM_HEADS = 8 | |
UNITS = 512 | |
DROPOUT = 0.1 | |
model = transformer( | |
vocab_size=VOCAB_SIZE, | |
num_layers=NUM_LAYERS, | |
units=UNITS, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def transformer(vocab_size, | |
num_layers, | |
units, | |
d_model, | |
num_heads, | |
dropout, | |
name="transformer"): | |
inputs = tf.keras.Input(shape=(None,), name="inputs") | |
dec_inputs = tf.keras.Input(shape=(None,), name="dec_inputs") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def decoder(vocab_size, | |
num_layers, | |
units, | |
d_model, | |
num_heads, | |
dropout, | |
name='decoder'): | |
inputs = tf.keras.Input(shape=(None,), name='inputs') | |
enc_outputs = tf.keras.Input(shape=(None, d_model), name='encoder_outputs') | |
look_ahead_mask = tf.keras.Input( |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def decoder_layer(units, d_model, num_heads, dropout, name="decoder_layer"): | |
inputs = tf.keras.Input(shape=(None, d_model), name="inputs") | |
enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs") | |
look_ahead_mask = tf.keras.Input( | |
shape=(1, None, None), name="look_ahead_mask") | |
padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask') | |
attention1 = MultiHeadAttention( | |
d_model, num_heads, name="attention_1")(inputs={ | |
'query': inputs, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def encoder(vocab_size, | |
num_layers, | |
units, | |
d_model, | |
num_heads, | |
dropout, | |
name="encoder"): | |
inputs = tf.keras.Input(shape=(None,), name="inputs") | |
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def encoder_layer(units, d_model, num_heads, dropout, name="encoder_layer"): | |
inputs = tf.keras.Input(shape=(None, d_model), name="inputs") | |
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask") | |
attention = MultiHeadAttention( | |
d_model, num_heads, name="attention")({ | |
'query': inputs, | |
'key': inputs, | |
'value': inputs, | |
'mask': padding_mask |
NewerOlder