Last active
May 18, 2020 20:46
-
-
Save dvgodoy/01010718f1fbf9a2ad4946740711b266 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
losses = [] | |
val_losses = [] | |
train_step = make_train_step(model, loss_fn, optimizer) | |
for epoch in range(n_epochs): | |
for x_batch, y_batch in train_loader: | |
x_batch = x_batch.to(device) | |
y_batch = y_batch.to(device) | |
loss = train_step(x_batch, y_batch) | |
losses.append(loss) | |
with torch.no_grad(): | |
for x_val, y_val in val_loader: | |
x_val = x_val.to(device) | |
y_val = y_val.to(device) | |
model.eval() | |
yhat = model(x_val) | |
val_loss = loss_fn(y_val, yhat) | |
val_losses.append(val_loss.item()) | |
print(model.state_dict()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
a few improvements:
for x_val, y_val in val_loader
- why re-run it in the loop?