Skip to content

Instantly share code, notes, and snippets.

@johnolafenwa
Last active May 8, 2018 20:20
Show Gist options
  • Save johnolafenwa/dc99d4941afcb29bac8f3a769c8f2934 to your computer and use it in GitHub Desktop.
Save johnolafenwa/dc99d4941afcb29bac8f3a769c8f2934 to your computer and use it in GitHub Desktop.
def train(num_epochs):
best_acc = 0.0
for epoch in range(num_epochs):
model.train()
train_acc = 0.0
train_loss = 0.0
for i, (images, labels) in enumerate(train_loader):
# Move images and labels to gpu if available
if cuda_avail:
images = Variable(images.cuda())
labels = Variable(labels.cuda())
# Clear all accumulated gradients
optimizer.zero_grad()
# Predict classes using images from the test set
outputs = model(images)
# Compute the loss based on the predictions and actual labels
loss = loss_fn(outputs, labels)
# Backpropagate the loss
loss.backward()
# Adjust parameters according to the computed gradients
optimizer.step()
train_loss += loss.cpu().data[0] * images.size(0)
_, prediction = torch.max(outputs.data, 1)
train_acc += torch.sum(prediction == labels.data)
# Call the learning rate adjustment function
adjust_learning_rate(epoch)
# Compute the average acc and loss over all 50000 training images
train_acc = train_acc / 50000
train_loss = train_loss / 50000
# Evaluate on the test set
test_acc = test()
# Save the model if the test acc is greater than our current best
if test_acc > best_acc:
save_models(epoch)
best_acc = test_acc
# Print the metrics
print("Epoch {}, Train Accuracy: {} , TrainLoss: {} , Test Accuracy: {}".format(epoch, train_acc, train_loss,
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment