Skip to content

Instantly share code, notes, and snippets.

@enric1994
Created April 19, 2020 22:11
Show Gist options
  • Save enric1994/96c87ddec5a1cc16db3e1a4d003f5f63 to your computer and use it in GitHub Desktop.
Save enric1994/96c87ddec5a1cc16db3e1a4d003f5f63 to your computer and use it in GitHub Desktop.
Pytorch boilerplate
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
from blendercam_loader import ClassifierDataset, DatasetFromSubset
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
# Experiment number
experiment = '1.0.1'
batch_size = 4
num_workers = 8
num_epochs = 100
step_size_up = 10
max_lr = 1e-2
base_lr = 1e-3
# Init logs
writer = SummaryWriter('/classifier/logs/' + experiment)
# Seed experiment
random_seed = 40
np.random.seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(random_seed)
# Image transformations
image_transforms = {
# Train uses data augmentation
'train':
transforms.Compose([
transforms.Resize(size=84),
transforms.RandomRotation(degrees=15),
transforms.ColorJitter(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
]),
# Validation does not use augmentation
'val':
transforms.Compose([
transforms.Resize(size=84),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
]),
}
# Load data
print('Loading data...')
dataset = ClassifierDataset(input_path='/datasets/classifier50k/input', output_path='/datasets/classifier50k/output')
# Split and apply transforms
train_size = int(len(dataset) * 0.8)
train_set, val_set = random_split(dataset, [train_size, len(dataset) - train_size])
train_dataset = DatasetFromSubset(
train_set, transform=image_transforms['train']
)
val_dataset = DatasetFromSubset(
val_set, transform=image_transforms['val']
)
# Dataloader
dataloaders = {
'train': DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers),
'val': DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
}
dataset_sizes = {x: len(dataloaders[x]) for x in ['train', 'val']}
# Model
print('Loading model...')
model = models.resnet18(pretrained=False)
model.fc.out_features = len(dataset.classes)
# Define learning
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9)
scheduler = lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up)
# Load model to GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
best_acc = 0.0
print('Start trainning. Experiment: {}'.format(experiment))
for epoch in range(num_epochs):
print('Epoch {} / {}'.format(epoch, num_epochs - 1))
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0.0
for inputs, labels in tqdm(dataloaders[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
# Log learning rate
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
writer.add_scalar('Learning Rate', current_lr, epoch)
# Log loss and accuracy
epoch_loss = running_loss / (dataset_sizes[phase] * batch_size)
epoch_acc = running_corrects.double() / (dataset_sizes[phase] * batch_size)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
writer.add_scalar('Loss/{}'.format(phase), epoch_loss, epoch)
writer.add_scalar('Accuracy/{}'.format(phase), epoch_acc, epoch)
# Save best model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
torch.save(model.state_dict(), '/classifier/data/models/' + experiment )
print()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment