Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
an example of pytorch on mnist dataset
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.optim as optim
## load mnist dataset
use_cuda = torch.cuda.is_available()
root = './data'
if not os.path.exists(root):
os.mkdir(root)
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
# if not exist, download mnist dataset
train_set = dset.MNIST(root=root, train=True, transform=trans, download=True)
test_set = dset.MNIST(root=root, train=False, transform=trans, download=True)
batch_size = 100
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=False)
print '==>>> total trainning batch number: {}'.format(len(train_loader))
print '==>>> total testing batch number: {}'.format(len(test_loader))
## network
class MLPNet(nn.Module):
def __init__(self):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(28*28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
x = x.view(-1, 28*28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def name(self):
return "MLP"
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def name(self):
return "LeNet"
## training
model = LeNet()
if use_cuda:
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
criterion = nn.CrossEntropyLoss()
for epoch in xrange(10):
# trainning
ave_loss = 0
for batch_idx, (x, target) in enumerate(train_loader):
optimizer.zero_grad()
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x), Variable(target)
out = model(x)
loss = criterion(out, target)
ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1
loss.backward()
optimizer.step()
if (batch_idx+1) % 100 == 0 or (batch_idx+1) == len(train_loader):
print '==>>> epoch: {}, batch index: {}, train loss: {:.6f}'.format(
epoch, batch_idx+1, ave_loss)
# testing
correct_cnt, ave_loss = 0, 0
total_cnt = 0
for batch_idx, (x, target) in enumerate(test_loader):
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x, volatile=True), Variable(target, volatile=True)
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out.data, 1)
total_cnt += x.data.size()[0]
correct_cnt += (pred_label == target.data).sum()
# smooth average
ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1
if(batch_idx+1) % 100 == 0 or (batch_idx+1) == len(test_loader):
print '==>>> epoch: {}, batch index: {}, test loss: {:.6f}, acc: {:.3f}'.format(
epoch, batch_idx+1, ave_loss, correct_cnt * 1.0 / total_cnt)
torch.save(model.state_dict(), model.name())
@samuela

This comment has been minimized.

Copy link

commented Oct 17, 2017

        x = self.fc1(x)
        x = self.fc2(x)

^^^ By the way, this is redundant since the composition of two linear operators is just another linear operator.

@feiaa

This comment has been minimized.

Copy link

commented Jan 27, 2018

请问你的代码我运行的结果:
MLP:准确率 0.86左右
LeNet:准确率 0.64左右
你自己运行的时候也是这个结果吗?一般的话只使用一层Softmax全连接准确率也会到达0.90以上的。

@xmfbit

This comment has been minimized.

Copy link
Owner Author

commented Jan 27, 2018

@feiaa 你好~当时的代码可能的确有问题。当时写的比较潦草,也没有注意精度。我修改了代码。
现在我的测试结果如下:
MLP:0.977
LeNet:0.990

谢谢你的反馈!

@brando90

This comment has been minimized.

Copy link

commented Mar 12, 2018

@Xafter how many iterations are those numbers? Are they accuracy or loss?

@liyougeng

This comment has been minimized.

Copy link

commented Mar 29, 2018

@xmfbit a typo at Line#101
x, targe = x.cuda(), target.cuda()
->->->
x, target = x.cuda(), target.cuda()

@EthanTang0115

This comment has been minimized.

Copy link

commented Apr 13, 2018

Just a tip, is it 'criterion' instead of 'ceriation' ?

@Ankur-Deka

This comment has been minimized.

Copy link

commented Jun 12, 2018

@samuela, it's not redundant because there is relu activation: x = F.relu(self.fc1(x))

@mattcleigh

This comment has been minimized.

Copy link

commented Jun 28, 2018

Is it necessary to wrap your training data with Variable? We do not need auto_grad active on these objects correct?

Edit: Nevermind, the Variable wrapper is no longer needed after 0.4.0
https://pytorch.org/2018/04/22/0_4_0-migration-guide.html

@zinwalin

This comment has been minimized.

Copy link

commented Jul 19, 2018

how to test real hand writing digits which are 28x28 greyscale image.

@frgfm

This comment has been minimized.

Copy link

commented Sep 2, 2018

@zinwalin initially, LeNet5 was meant for image of shape (32, 32)
In order to get the same spatial dimension after C1 (28, 28), you need to change the Conv1. Not sure how this implementation performs as it seems considerably larger in model capacity

line 55 you can see that self.conv1 is
nn.Conv2d(in_channels=1, out_channels=20, kernel_size=(5, 5), stride=1, padding=0)
As your kernel is 5*5 of stride 1 without padding, output shape is equal to (input_shape - 4) on both spatial dimensions

To preserve the spatial shape of (28, 28), you need to use padding=2 :
nn.Conv2d(in_channels=1, out_channels=20, kernel_size=(5, 5), stride=1, padding=2)
In short:
self.conv1 = nn.Conv2d(1, 20, 5, 1, 2)

That being said, you will have to edit the fc accordingly to be compatible with this edit.
The implementation here does not look exactly like the original LeNet5:
The one above
[1@28x28] Input
[20@24x24] CONV1 (5x5), stride 1, pad 0
[20@12x12] POOL1 (2x2) stride 2
[50@8x8] CONV2 (5x5), stride 1, pad 0
[50@4x4] POOL2 (2x2) stride 2
[800] FC
[500] FC
[10] Softmax

Suggested edit (closer to original one in terms of feature map size)
[1@28x28] Input
[20@28x28] CONV1 (5x5), stride 1, pad 2
[20@14x14] POOL1 (2x2) stride 2
[50@10x10] CONV2 (5x5), stride 1, pad 0
[50@5x5] POOL2 (2x2) stride 2
[1250] FC
[500] FC
[10] Softmax

Original one
[1@32x32] Input
[6@28x28] CONV1 (5x5), stride 1, pad 0
[6@14x14] POOL1 (2x2) stride 2
[16@10x10] CONV2 (5x5), stride 1, pad 0
[16@5x5] POOL2 (2x2) stride 2
[120] FC
[84] FC
[10] Softmax

In terms of extracted features, the important figure is the shape after the convolution blocs. So here, the feature maps' size is 50@4x4 compared to 16@5x5 (half the size if you flatten it, but larger individual features) in the original one.

@voldikss

This comment has been minimized.

Copy link

commented Dec 20, 2018

问个问题,想知道为什么对 label 没有进行 one hot 编码,我看了在 pytorch 官方的例子中也没有进行 one hot 编码,这是为什么呢

@zhanwenchen

This comment has been minimized.

Copy link

commented Feb 26, 2019

能不能拜托改成python3?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.