Skip to content

Instantly share code, notes, and snippets.

View Shivam-316's full-sized avatar
🎯
Focusing

Shivam Sharma Shivam-316

🎯
Focusing
View GitHub Profile
@Shivam-316
Shivam-316 / GAN_Architecture.py
Created May 9, 2022 17:11
GAN Architecture for Image to Image Translation
class GAN(tf.keras.Model):
def __init__(self, generator, discriminator, **kwargs):
super(GAN, self).__init__(**kwargs)
self.generator = generator
self.discriminator = discriminator
def compile(self, generator_optimizer, discriminator_optimizer, loss_fn, metric_fn):
super(GAN, self).compile()
self.generator_optimizer = generator_optimizer
self.discriminator_optimizer = discriminator_optimizer
def predict(input):
hidden=[tf.zeros((1,512)),tf.zeros((1,512))]
_,enc_h,enc_c=encoder(input,hidden)
enc_states=[enc_h,enc_c]
result=[]
dec_input = tf.expand_dims(input[:,0], 0)
for t in range(input.shape[1]):
dec_output,_,_=decoder(dec_input,enc_states)
output_id=tf.math.argmax(dec_output[0],-1)
@tf.function
def train(input,target,enc_hidden):
loss__=0.0
with tf.GradientTape() as tape:
enc_output,enc_h,enc_c=encoder(input,enc_hidden)
enc_states=[enc_h,enc_c]
dec_input=tf.expand_dims(target[:,0],1)
for t in range(1,target.shape[1]):
dec_output,_,_=decoder(dec_input,enc_states)
optimizer=keras.optimizers.Adam()
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True,reduction='none')
def loss_fn(true,pred):
mask = tf.math.logical_not(tf.math.equal(true, 0))
loss_=loss(true,pred)
mask=tf.cast(mask,dtype=loss_.dtype)
loss_*=mask
return tf.reduce_mean(loss_)
class Decoder(keras.Model):
def __init__(self,vocab_size=10000,emb_dim=128,units=256,batch_size=64):
super(Decoder,self).__init__()
self.units = units
self.batch = batch_size
self.emb_layer = keras.layers.Embedding(vocab_size,emb_dim)
self.lstm = keras.layers.LSTM(self.units,return_sequences=True,return_state=True)
self.fc=keras.layers.Dense(vocab_size)
def call(self,x,states):
class Encoder(keras.Model):
def __init__(self,vocab_size=10000,emb_dim=128,units=256,batch_size=64):
super(Encoder,self).__init__()
self.units = units
self.batch = batch_size
self.emb_layer = keras.layers.Embedding(vocab_size,emb_dim)
self.lstm = keras.layers.LSTM(self.units,return_sequences=True,return_state=True)
def call(self,x,states):
emb=self.emb_layer(x)
@Shivam-316
Shivam-316 / preprocess_and_tokenize.py
Created November 8, 2020 08:44
This Function will do the required processing and tokenize text into vectors.
def preprocess_and_tokenize(language,vocab_size,oov_size,is_input=False,is_output=False):
if is_output:
lang=[]
for text in language:
lang.append('<sos> '+ text +' <eos>')
lang=np.array(lang)
else:
lang=language
tokenizer=keras.preprocessing.text.Tokenizer(vocab_size,oov_token=oov_size)
tokenizer.fit_on_texts(lang)