Skip to content

Instantly share code, notes, and snippets.

@Franklin-Yao
Created July 7, 2020 22:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Franklin-Yao/00d9a0d28fd12c939f0da47ee61150d4 to your computer and use it in GitHub Desktop.
Save Franklin-Yao/00d9a0d28fd12c939f0da47ee61150d4 to your computer and use it in GitHub Desktop.
pytorch1.7 mix precision training
# NEW
scaler = torch.cuda.amp.GradScaler()
for epoch in range(self.n_epochs):
for i, (X_batch, y_batch) in enumerate(batches):
X_batch = X_batch.cuda()
y_batch = y_batch.cuda()
optimizer.zero_grad()
# NEW
with torch.cuda.amp.autocast():
y_pred = model(X_batch).squeeze()
loss = self.loss_fn(y_pred, y_batch)
# NEW
scaler.scale(loss).backward()
lv = loss.detach().cpu().numpy()
if i % 100 == 0:
print(f"Epoch {epoch + 1}/{self.n_epochs}; Batch {i}; Loss {lv}")
# NEW
scaler.step(optimizer)
scaler.update()
scheduler.step()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment