class PreProcessingLayer(Layer): | |
def __init__(self, num_neurons, vocabular_size): | |
super(PreProcessingLayer, self).__init__() | |
# Initialize | |
self.num_neurons = num_neurons | |
# Add embedings and positional encoding | |
self.embedding = Embedding(vocabular_size, self.num_neurons) | |
positional_encoding_handler = PositionalEncoding(vocabular_size, self.num_neurons) | |
self.positional_encoding = positional_encoding.get_positional_encoding() | |
# Add embedings and positional encoding | |
self.dropout = Dropout(0.1) | |
def call(self, sequence, training, mask): | |
sequence_lenght = tf.shape(sequence)[1] | |
sequence = self.embedding(sequence) | |
sequence *= tf.math.sqrt(tf.cast(self.num_neurons, tf.float32)) | |
sequence += self.positional_encoding[:, :sequence_lenght, :] | |
sequence = self.dropout(sequence, training=training) | |
return sequence |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment