class Transformer(Model): | |
def __init__(self, num_layers, num_neurons, num_hidden_neurons, num_heads, input_vocabular_size, target_vocabular_size): | |
super(Transformer, self).__init__() | |
self.encoder = Encoder(num_neurons, num_hidden_neurons, num_heads, input_vocabular_size, num_layers) | |
self.decoder = Decoder(num_neurons, num_hidden_neurons, num_heads, target_vocabular_size, num_layers) | |
self.linear_layer = Dense(target_vocabular_size) | |
def call(self, transformer_input, tar, training, encoder_padding_mask, look_ahead_mask, decoder_padding_mask): | |
encoder_output = self.encoder(transformer_input, training, encoder_padding_mask) | |
decoder_output, attention_weights = self.decoder(tar, encoder_output, training, look_ahead_mask, decoder_padding_mask) | |
output = self.linear_layer(decoder_output) | |
return output, attention_weights |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment