Created
April 22, 2017 00:33
-
-
Save c0nn3r/f1d6a11d18ba700cf6e0fcc446f84377 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import argparse | |
import numpy as np | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from tqdm import tqdm | |
from torch import optim | |
from torch.autograd import Variable | |
from torchvision import datasets, transforms | |
parser = argparse.ArgumentParser(description='Recurrent Unit Baselines') | |
parser.add_argument('--batch_size', help='batch size of network', type=int, default=16) | |
parser.add_argument('--epochs', help='number of epochs', type=int, default=10) | |
parser.add_argument('--hidden_layer_size', help='size of the hidden layer', type=int, default=100) | |
parser.add_argument('--gpu', help='use gpu for training', action='store_true') | |
parser.add_argument('--learning_rate', help='the learning rate', type=float, default=0.01) | |
parser.add_argument('--gradient_clipping_value', help='the gradient clipping value', type=int, default=1) | |
args = parser.parse_args() | |
def sequential_MNIST(batch_size, gpu=False, dataset_folder='./data'): | |
kwargs = {'num_workers': 1, 'pin_memory': True} if gpu else {} | |
train_loader = torch.utils.data.DataLoader( | |
datasets.MNIST(dataset_folder, train=True, download=True, | |
transform=transforms.Compose([ | |
transforms.ToTensor(), | |
transforms.Normalize((0.1307,), (0.3081,)), | |
transforms.Lambda(lambda x: x.view(-1, 1)) | |
])), | |
batch_size=batch_size, shuffle=True, **kwargs) | |
test_loader = torch.utils.data.DataLoader( | |
datasets.MNIST(dataset_folder, train=False, transform=transforms.Compose([ | |
transforms.ToTensor(), | |
transforms.Normalize((0.1307,), (0.3081,)), | |
transforms.Lambda(lambda x: x.view(-1, 1)) | |
])), | |
batch_size=batch_size, shuffle=False, **kwargs) | |
return (train_loader, test_loader) | |
training_data, testing_data = sequential_MNIST(args.batch_size, gpu=args.gpu) | |
class LSTMBaseline(nn.Module): | |
def __init__(self): | |
super(LSTMBaseline, self).__init__() | |
self.input_layer = nn.LSTM(1, args.hidden_layer_size, batch_first=True) | |
self.linear_layer = nn.Linear(args.hidden_layer_size, 10) | |
def forward(self, x): | |
x, _ = self.input_layer(x) | |
x = self.linear_layer(x[:, -1, :]) | |
return x | |
model = LSTMBaseline() | |
if args.gpu: | |
model.cuda() | |
criterion = nn.CrossEntropyLoss() | |
def train(): | |
model.train() | |
for current_batch, (data, label) in enumerate(tqdm(training_data)): | |
if args.gpu: | |
data, label = Variable(data).cuda(), Variable(label).cuda() | |
else: | |
data, label = Variable(data), Variable(label) | |
model.zero_grad() | |
output = model(data) | |
loss = criterion(output, label) | |
if (current_batch + 1) % 100 == 0: | |
print(f'Current Loss: {loss.data[0]}') | |
loss.backward() | |
torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clipping_value) | |
for p in model.parameters(): | |
p.data.add_(-args.learning_rate, p.grad.data) | |
def test(): | |
model.eval() | |
correct = 0 | |
total = 0 | |
print('Testing accurracy...') | |
for data, label in tqdm(testing_data): | |
total += label.size(0) | |
if args.gpu: | |
data, label = Variable(data).cuda(), Variable(label).cuda() | |
else: | |
data, label = Variable(data), Variable(label) | |
output = model(data) | |
_, predicted = torch.max(output.data, 1) | |
correct += (predicted == label.data).sum() | |
print(str(100 * correct / total) + '%') | |
def main(): | |
for epoch in range(1, args.epochs): | |
print(f'Epoch: {epoch}') | |
train() | |
test() | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment