Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@reesepathak
Last active June 5, 2019 16:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save reesepathak/8f7f708d18c7d5d5248d6736a2976ecf to your computer and use it in GitHub Desktop.
Save reesepathak/8f7f708d18c7d5d5248d6736a2976ecf to your computer and use it in GitHub Desktop.
Convolutional auto encoder for MNIST
import argparse
import torch
import torch.nn as nn
from torch.nn import Conv2d, ConvTranspose2d, Linear
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, datasets
from torchvision.utils import save_image
import os
class MNIST_CNN(nn.Module):
def __init__(self):
super(MNIST_CNN, self).__init__()
self.enc_conv1 = Conv2d(1, 16, 3, 1, padding=1) # indim x indim x 16 volume
self.enc_conv2 = Conv2d(16, 8, 3, 1, padding=1) # indim x indim x 16 volume
self.enc_conv3 = Conv2d(8, 8, 3, 1, padding=1)
self.dec_conv1 = ConvTranspose2d(8, 8, 5, 2, padding=1)
self.dec_conv2 = ConvTranspose2d(8, 8, 2, 2, padding=0)
self.dec_conv3 = ConvTranspose2d(8, 16, 2, 2, padding=0)
self.logits = Conv2d(16, 1, 3, 1, padding=1)
def encode(self, x):
x = F.max_pool2d(F.relu(self.enc_conv1(x)), 2, 2)
x = F.max_pool2d(F.relu(self.enc_conv2(x)), 2, 2)
return F.max_pool2d(F.relu(self.enc_conv3(x)), 2, 2)
def decode(self, x):
x = F.relu(self.dec_conv1(x)) #F.interpolate(x, scale_factor=2, mode='nearest')))
x = F.relu(self.dec_conv2(x)) #F.interpolate(x, scale_factor=2, mode='nearest')))
return F.relu(self.dec_conv3(x)) #F.interpolate(x, scale_factor=2, mode='nearest')))
def forward(self, x):
x = self.encode(x)
x = self.decode(x)
return torch.sigmoid((self.logits(x)))
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device, dtype=torch.float64)
optimizer.zero_grad()
recon_data = model(data.float())
if batch_idx and args.trace > 0: pdb.set_trace()
loss = F.mse_loss(recon_data.float(), data.float(), reduction='sum')
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
loss.item() / len(data)))
def main():
parser = argparse.ArgumentParser(description="Convolutional Autoencoder for MNIST")
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (defaults to 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (defaults to 1000)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (defaults to 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--trace', action='store_true', default=False,
help='turns on a pdb trace token')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (defaults to 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (defaults to 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (defaults to 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
print("Using CUDA" if use_cuda else "Not using CUDA")
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('./MNIST', train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
model = MNIST_CNN().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment