This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class GAN(tf.keras.Model): | |
def __init__(self, generator, discriminator, **kwargs): | |
super(GAN, self).__init__(**kwargs) | |
self.generator = generator | |
self.discriminator = discriminator | |
def compile(self, generator_optimizer, discriminator_optimizer, loss_fn, metric_fn): | |
super(GAN, self).compile() | |
self.generator_optimizer = generator_optimizer | |
self.discriminator_optimizer = discriminator_optimizer |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def predict(input): | |
hidden=[tf.zeros((1,512)),tf.zeros((1,512))] | |
_,enc_h,enc_c=encoder(input,hidden) | |
enc_states=[enc_h,enc_c] | |
result=[] | |
dec_input = tf.expand_dims(input[:,0], 0) | |
for t in range(input.shape[1]): | |
dec_output,_,_=decoder(dec_input,enc_states) | |
output_id=tf.math.argmax(dec_output[0],-1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@tf.function | |
def train(input,target,enc_hidden): | |
loss__=0.0 | |
with tf.GradientTape() as tape: | |
enc_output,enc_h,enc_c=encoder(input,enc_hidden) | |
enc_states=[enc_h,enc_c] | |
dec_input=tf.expand_dims(target[:,0],1) | |
for t in range(1,target.shape[1]): | |
dec_output,_,_=decoder(dec_input,enc_states) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
optimizer=keras.optimizers.Adam() | |
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True,reduction='none') | |
def loss_fn(true,pred): | |
mask = tf.math.logical_not(tf.math.equal(true, 0)) | |
loss_=loss(true,pred) | |
mask=tf.cast(mask,dtype=loss_.dtype) | |
loss_*=mask | |
return tf.reduce_mean(loss_) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Decoder(keras.Model): | |
def __init__(self,vocab_size=10000,emb_dim=128,units=256,batch_size=64): | |
super(Decoder,self).__init__() | |
self.units = units | |
self.batch = batch_size | |
self.emb_layer = keras.layers.Embedding(vocab_size,emb_dim) | |
self.lstm = keras.layers.LSTM(self.units,return_sequences=True,return_state=True) | |
self.fc=keras.layers.Dense(vocab_size) | |
def call(self,x,states): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Encoder(keras.Model): | |
def __init__(self,vocab_size=10000,emb_dim=128,units=256,batch_size=64): | |
super(Encoder,self).__init__() | |
self.units = units | |
self.batch = batch_size | |
self.emb_layer = keras.layers.Embedding(vocab_size,emb_dim) | |
self.lstm = keras.layers.LSTM(self.units,return_sequences=True,return_state=True) | |
def call(self,x,states): | |
emb=self.emb_layer(x) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def preprocess_and_tokenize(language,vocab_size,oov_size,is_input=False,is_output=False): | |
if is_output: | |
lang=[] | |
for text in language: | |
lang.append('<sos> '+ text +' <eos>') | |
lang=np.array(lang) | |
else: | |
lang=language | |
tokenizer=keras.preprocessing.text.Tokenizer(vocab_size,oov_token=oov_size) | |
tokenizer.fit_on_texts(lang) |