Skip to content

Instantly share code, notes, and snippets.

@alfredplpl
Last active May 16, 2022 21:57
Show Gist options
  • Save alfredplpl/f91fb2ed93f021929701ac8e3758754f to your computer and use it in GitHub Desktop.
Save alfredplpl/f91fb2ed93f021929701ac8e3758754f to your computer and use it in GitHub Desktop.
import copy
import torch
from denoising_diffusion_pytorch import Unet, GaussianDiffusion, Trainer
from torchvision import datasets, transforms
from torch.optim import Adam
from torch.utils.data import Dataset
from torch.utils import data
from torch.cuda.amp import GradScaler
from pathlib import Path
def cycle(dl):
while True:
for data in dl:
yield data
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_model_average(self, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.update_average(old_weight, up_weight)
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
class Mydataset(Dataset):
def __init__(self, mnist_data):
self.mnist = mnist_data
def __len__(self):
return len(self.mnist)
def __getitem__(self, idx):
X = self.mnist[idx][0]
X = (X * 2) - 1
y = self.mnist[idx][1]
X=torch.Tensor(X)
return X
class CutomTrainer(Trainer):
def __init__(
self,
diffusion_model,
ema_decay = 0.995,
image_size = 32,
train_batch_size = 32,
train_lr = 2e-5,
train_num_steps = 100000,
gradient_accumulate_every = 2,
amp = False,
step_start_ema = 2000,
update_ema_every = 10,
save_and_sample_every = 1000,
results_folder = './results'
):
self.model = diffusion_model
self.ema = EMA(ema_decay)
self.ema_model = copy.deepcopy(self.model)
self.update_ema_every = update_ema_every
self.step_start_ema = step_start_ema
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.image_size = diffusion_model.image_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
mnist_data = datasets.MNIST('.',
transform=transforms.Compose([
transforms.Resize(image_size, transforms.InterpolationMode.NEAREST),
transforms.ToTensor()
]),
download=True)
self.ds = Mydataset(mnist_data)
self.dl = cycle(data.DataLoader(self.ds, batch_size = train_batch_size, shuffle=True, pin_memory=True))
self.opt = Adam(diffusion_model.parameters(), lr=train_lr)
self.step = 0
self.amp = amp
self.scaler = GradScaler(enabled = amp)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
self.reset_parameters()
model = Unet(
dim = 32,
dim_mults = (1, 2, 4, 8, 16),
channels=1
).cuda()
diffusion = GaussianDiffusion(
model,
image_size = 32,
timesteps = 1000, # number of steps
loss_type = 'l1',
channels=1
).cuda()
trainer = CutomTrainer(
diffusion,
train_batch_size = 256,
train_lr = 2e-5,
train_num_steps = 700000, # total training steps
gradient_accumulate_every = 2, # gradient accumulation steps
ema_decay = 0.995, # exponential moving average decay
save_and_sample_every=70
)
trainer.train()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment