Created
August 24, 2020 08:14
-
-
Save TheBojda/29324b9e69638024d9a6513b38f442a6 to your computer and use it in GitHub Desktop.
PyTorch CIFAR10 neural network example
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torchvision | |
import torchvision.transforms as transforms | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import torch.optim as optim | |
transform = transforms.Compose([ | |
transforms.ToTensor() | |
]) | |
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, | |
download=True, transform=transform) | |
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, | |
shuffle=True, num_workers=2) | |
testset = torchvision.datasets.CIFAR10(root='./data', train=False, | |
download=True, transform=transform) | |
testloader = torch.utils.data.DataLoader(testset, batch_size=4, | |
shuffle=False, num_workers=2) | |
classes = ('plane', 'car', 'bird', 'cat', | |
'deer', 'dog', 'frog', 'horse', 'ship', 'truck') | |
dataiter = iter(trainloader) | |
images, labels = dataiter.next() | |
print(images.size()) | |
plt.figure(figsize=(10,10)) | |
for i in range(4): | |
plt.subplot(5,5,i+1) | |
plt.xticks([]) | |
plt.yticks([]) | |
plt.grid(False) | |
plt.imshow(np.transpose(images[i].numpy(), (1, 2, 0)), cmap=plt.cm.binary) | |
plt.xlabel(classes[labels[i]]) | |
plt.show() | |
class Net(nn.Module): | |
def __init__(self): | |
super(Net, self).__init__() | |
self.conv1 = nn.Conv2d(3, 6, 5) | |
self.pool = nn.MaxPool2d(2, 2) | |
self.conv2 = nn.Conv2d(6, 16, 5) | |
self.fc1 = nn.Linear(16 * 5 * 5, 120) | |
self.fc2 = nn.Linear(120, 84) | |
self.fc3 = nn.Linear(84, 10) | |
def forward(self, x): | |
x = self.pool(F.relu(self.conv1(x))) | |
x = self.pool(F.relu(self.conv2(x))) | |
x = x.view(-1, 16 * 5 * 5) | |
x = F.relu(self.fc1(x)) | |
x = F.relu(self.fc2(x)) | |
x = self.fc3(x) | |
return x | |
net = Net() | |
print(net) | |
params = list(net.parameters()) | |
print(len(params)) | |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
net.to(device) | |
criterion = nn.CrossEntropyLoss() | |
optimizer = optim.Adam(net.parameters()) | |
for epoch in range(2): # loop over the dataset multiple times | |
running_loss = 0.0 | |
for i, data in enumerate(trainloader, 0): | |
# get the inputs; data is a list of [inputs, labels] | |
inputs, labels = data[0].to(device), data[1].to(device) | |
# zero the parameter gradients | |
optimizer.zero_grad() | |
# forward + backward + optimize | |
outputs = net(inputs) | |
loss = criterion(outputs, labels) | |
loss.backward() | |
optimizer.step() | |
# print statistics | |
running_loss += loss.item() | |
if i % 2000 == 1999: # print every 2000 mini-batches | |
print('[%d, %5d] loss: %.3f' % | |
(epoch + 1, i + 1, running_loss / 2000)) | |
running_loss = 0.0 | |
print('Finished Training') | |
PATH = './cifar_net.pth' | |
torch.save(net.state_dict(), PATH) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment