Skip to content

Instantly share code, notes, and snippets.

@oscarknagg
Last active January 3, 2019 23:28
Show Gist options
  • Save oscarknagg/c002cdc82dc1135784f806dc685df955 to your computer and use it in GitHub Desktop.
Save oscarknagg/c002cdc82dc1135784f806dc685df955 to your computer and use it in GitHub Desktop.
Linf adversarial training for MNIST
from torchvision import transforms, datasets
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(1024, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 1024)
x = self.fc1(x)
return F.log_softmax(x, dim=1)
train = datasets.MNIST('../data/', train=True,
transform=transforms.Compose([transforms.ToTensor(),]),
download=True)
train_loader = DataLoader(train, batch_size=128)
model = Net()
model.train()
optimiser = optim.SGD(model.parameters(), lr=0.1)
loss_fn = nn.CrossEntropyLoss()
for epoch in range(50):
for x, y, in train_loader:
# Projected gradient descent from earlier Gist
# https://gist.github.com/oscarknagg/45b187c236c6262b1c4bbe2d0920ded6
x_adv = projected_gradient_descent(model, x, y, loss_fn,
num_steps=40, step_size=0.01,
eps=0.3, eps_norm='inf',
step_norm='inf')
optimiser.zero_grad()
y_pred = model(x_adv)
loss = loss_fn(y_pred, y)
loss.backward()
optimiser.step()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment