Skip to content

Instantly share code, notes, and snippets.

@amineHY
Last active April 19, 2019 15:30
Show Gist options
  • Save amineHY/0eba7073bb5c7c43487498769ec56a7c to your computer and use it in GitHub Desktop.
Save amineHY/0eba7073bb5c7c43487498769ec56a7c to your computer and use it in GitHub Desktop.
This code implement the training of a CNN in python using PyTorch
print('Training the Deep Learning network ...')
learning_rate = 0.001
criterion = torch.nn.CrossEntropyLoss() # Softmax is internally computed.
optimizer = torch.optim.Adam(params=model.parameters(), lr=learning_rate)
train_cost = []
train_accu = []
batch_size = 32
training_epochs = 15
total_batch = len(mnist_train) // batch_size
print('Size of the training dataset is {}'.format(mnist_train.data.size()))
print('Size of the testing dataset'.format(mnist_test.data.size()))
print('Batch size is : {}'.format(batch_size))
print('Total number of batches is : {0:2.0f}'.format(total_batch))
print('\nTotal number of epochs is : {0:2.0f}'.format(training_epochs))
def compute_accuracy(Y_target, hypothesis):
Y_prediction = hypothesis.data.max(dim=1)[1]
accuracy = ((Y_prediction.data == Y_target.data).float().mean())
return accuracy.item()
for epoch in range(training_epochs):
avg_cost = 0
for i, (batch_X, batch_Y) in enumerate(data_loader):
# Select a minibatch
X = Variable(batch_X) # image is already size of (28x28), no reshape
Y = Variable(batch_Y) # label is not one-hot encoded
# initialization of the gradients
optimizer.zero_grad()
# Forward propagation: compute the output
hypothesis = model(X)
# Computation of the cost J
cost = criterion(hypothesis, Y) # <= compute the loss function
# Backward propagation
cost.backward() # <= compute the gradients
# Update parameters (weights and biais)
optimizer.step()
# Print some performance to monitor the training
train_accu.append(compute_accuracy(Y, hypothesis))
train_cost.append(cost.item())
if i % 200 == 0:
print("Epoch= {},\t batch = {},\t cost = {:2.4f},\t accuracy = {}".format(epoch+1, i, train_cost[-1], train_accu[-1]))
avg_cost += cost.data / total_batch
print("[Epoch: {:>4}], averaged cost = {:>.9}".format(epoch + 1, avg_cost.item()))
print('Learning Finished!')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment