Skip to content

Instantly share code, notes, and snippets.

View udithhaputhanthri's full-sized avatar

Udith Haputhanthri udithhaputhanthri

View GitHub Profile
@udithhaputhanthri
udithhaputhanthri / train.py
Created March 26, 2021 08:25
WGAN_scripts
C=Critic(img_channels,hidden_C).to(device)
G=Generator(noise_channels,img_channels,hidden_G).to(device)
#C=C.apply(init_weights)
#G=G.apply(init_weights)
wandb.watch(G, log='all', log_freq=10)
wandb.watch(C, log='all', log_freq=10)
opt_C=torch.optim.Adam(C.parameters(),lr=lr, betas=(0.5,0.999))
@udithhaputhanthri
udithhaputhanthri / losses.py
Created March 26, 2021 08:24
WGAN_scripts
def get_gen_loss(crit_fake_pred):
gen_loss= -torch.mean(crit_fake_pred)
return gen_loss
def get_crit_loss(crit_fake_pred, crit_real_pred, gradient_penalty, c_lambda):
crit_loss= torch.mean(crit_fake_pred)- torch.mean(crit_real_pred)+ c_lambda* gradient_penalty
return crit_loss
def get_gradient(crit, real_imgs, fake_imgs, epsilon):
mixed_imgs= real_imgs* epsilon + fake_imgs*(1- epsilon)
mixed_scores= crit(mixed_imgs)
gradient= torch.autograd.grad(outputs= mixed_scores,
inputs= mixed_imgs,
grad_outputs= torch.ones_like(mixed_scores),
create_graph=True,
retain_graph=True)[0]
class Generator(nn.Module):
def __init__(self,noise_channels,img_channels,hidden_G):
super(Generator,self).__init__()
self.G=nn.Sequential(
conv_trans_block(noise_channels,hidden_G*16,kernal_size=4,stride=1,padding=0),
conv_trans_block(hidden_G*16,hidden_G*8),
conv_trans_block(hidden_G*8,hidden_G*4),
conv_trans_block(hidden_G*4,hidden_G*2),
nn.ConvTranspose2d(hidden_G*2,img_channels,kernel_size=4,stride=2,padding=1),
nn.Tanh()
class conv_trans_block(nn.Module):
def __init__(self,in_channels,out_channels,kernal_size=4,stride=2,padding=1):
super(conv_trans_block,self).__init__()
self.block=nn.Sequential(
nn.ConvTranspose2d(in_channels,out_channels,kernal_size,stride,padding),
nn.BatchNorm2d(out_channels),
nn.ReLU())
def forward(self,x):
return self.block(x)
import zipfile
import os
if not os.path.isfile('celeba.zip'):
!mkdir data_faces && wget https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip
with zipfile.ZipFile("celeba.zip","r") as zip_ref:
zip_ref.extractall("data_faces/")
from torch.utils.data import DataLoader
transform = transforms.Compose([
@udithhaputhanthri
udithhaputhanthri / evaluate_generator.py
Last active June 1, 2020 09:59
Image-to-Image Translation Using Conditional DCGANs
G=Generator()
filepath='gdrive/My Drive/pix2pixmodel/G_L1.pth'
G.load_state_dict(torch.load(filepath,map_location=torch.device('cpu')))
G.eval()
for x,y in dataloader:
z=torch.randn((batch_size,1,128,128)).to(device)
generated_imgs=G(x[:5],z[:5])
real_imgs=x[:5]
imgs=torch.cat([generated_imgs,real_imgs,y[:5]],0).data.cpu()
@udithhaputhanthri
udithhaputhanthri / support_blocks.py
Last active May 29, 2020 18:08
Image-to-Image Translation Using Conditional DCGANs
class conv_block(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=4,stride=2,padding=1):
super(conv_block,self).__init__()
self.conv_block=nn.Sequential(
nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding),
nn.LeakyReLU(0.2),
nn.BatchNorm2d(out_channels)
)
def forward(self,x):
return self.conv_block(x)
@udithhaputhanthri
udithhaputhanthri / create_dataloader.py
Last active May 30, 2020 03:59
Image-to-Image Translation Using Conditional DCGANs
class get_dataset(torch.utils.data.Dataset):
def __init__(self,name='edges2shoes',type_='train',transform=None):
self.dir_=name+'/'+name+'/'+type_
self.img_list=sorted(os.listdir(self.dir_))
self.transform=transform
def __len__(self):
return len(self.img_list)
def __getitem__(self,idx):
both=plt.imread(self.dir_+'/'+self.img_list[idx]).astype('uint8')
x=both[:,:both.shape[1]//2,:]
@udithhaputhanthri
udithhaputhanthri / train.py
Last active May 31, 2020 19:59
Image-to-Image Translation Using Conditional DCGANs
#G(x,z), D(x,y)
factor=1
G.train()
D.train()
for epoch in range(50):
for i,(x,y) in enumerate(dataloader):
opt_D.zero_grad()
opt_G.zero_grad()
x=x.to(device)