Skip to content

Instantly share code, notes, and snippets.

def plot(*loss_history):
keys = loss_history[0].keys()
for k in keys:
plt.figure()
data = []
for l in loss_history:
data.extend(l[k])
seaborn.lineplot(x=range(len(data)), y=data).set_title(k)
plot(loss_history)
plot(val_metrics_history)
def fit(model, epochs, train_loader, val_loader, loss_func, optimizer, lr_scheduler, val_metrics):
loss_history = {'train': [], 'val': []}
val_metrics_history = {k:[] for k in val_metrics}
for epoch in tqdm(range(epochs)):
model.train()
loss_history_for_batch = []
val_metrics_for_batch = {k:[] for k in val_metrics}
model.fc = nn.Linear(model.fc.in_features, 2)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #use CUDA if exists
print('device', device)
model.to(device)
model = models.resnet18(pretrained=True)
model
model = torch.nn.Linear(2, 1)
optimizer = torch.optim.SGD(model.parameters(), lr=.001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1)
lrs = []
for epoch in range(10):
optimizer.step()
lrs.append(optimizer.param_groups[0]["lr"])
scheduler.step()
show_batch(train_loader, True)
denorm = lambda x: ((x + 1) / 2).clamp(0, 1)
def show_batch(loader, do_denorm=False):
fig, ax = plt.subplots(figsize=(24, 12))
inputs, classes = next(iter(loader))
if do_denorm:
inputs = denorm(inputs)
out = make_grid(inputs, nrow=8).permute(1, 2, 0)
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(out)
batch_size=64
train_loader = DataLoader(train_ds, batch_size, shuffle=True)
val_loader = DataLoader(val_ds, batch_size)
img, label = train_ds[0]
plt.imshow(img.permute(1, 2, 0))
torch.min(train_ds[0][0]),torch.max(train_ds[0][0])