Skip to content

Instantly share code, notes, and snippets.

@reddragon
Created April 22, 2017 05:12
Show Gist options
  • Save reddragon/3fa9c3ee4d10a7be242183d2e98cfc5d to your computer and use it in GitHub Desktop.
Save reddragon/3fa9c3ee4d10a7be242183d2e98cfc5d to your computer and use it in GitHub Desktop.
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 5)
self.conv2 = nn.Conv2d(1, 1, 5)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(1 * 10 * 10, 100)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
x = F.relu((self.conv1(x)))
x = F.relu(F.max_pool2d((self.conv2(x)), 2))
# x = self.pool(x)
# x = F.relu(self.conv3(x))
x = x.view(-1, 1 * 10 * 10)
# x = F.relu(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return F.log_softmax(x)
def unnormalize(img):
img = img / 2 + 0.5
return img
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# Basically re-maps the [0,1] pixel to the [-1,1] range so that mean is 0.
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Getting the data
trainset = torchvision.datasets.MNIST(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=50,
shuffle=True, num_workers=2)
testset = torchvision.datasets.MNIST(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=50,
shuffle=False, num_workers=2)
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.5)
# zero the parameter gradients
optimizer.zero_grad()
for epoch in range(20): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# print inputs.numpy().shape
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 100 == 99: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Finished Training')
correct = 0
total = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the %d test images: %f %%' % (
total, 100.0 * correct / total))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment