Created
November 15, 2021 18:19
-
-
Save hydramst/59718581b17fa6379bd39dd732ac3a4e to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Step 1: Импорт PyTorch and Opacus | |
import torch | |
from torchvision import datasets, transforms | |
import numpy as np | |
from opacus import PrivacyEngine | |
from tqdm import tqdm | |
# Step 2: Загрузка MNIST | |
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../mnist', train=True, download=True, | |
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), | |
(0.3081,)),]),), batch_size=64, shuffle=True, num_workers=1, pin_memory=True) | |
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../mnist', train=False, | |
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), | |
(0.3081,)),]),), batch_size=1024, shuffle=True, num_workers=1, pin_memory=True) | |
# Step 3: Создание нейронной сети PyTorch, классификатора и оптимизатора | |
model = torch.nn.Sequential(torch.nn.Conv2d(1, 16, 8, 2, padding=3), torch.nn.ReLU(), torch.nn.MaxPool2d(2, 1), | |
torch.nn.Conv2d(16, 32, 4, 2), torch.nn.ReLU(), torch.nn.MaxPool2d(2, 1), torch.nn.Flatten(), | |
torch.nn.Linear(32 * 4 * 4, 32), torch.nn.ReLU(), torch.nn.Linear(32, 10)) | |
optimizer = torch.optim.SGD(model.parameters(), lr=0.05) | |
# Step 4: Attaching a Differential Privacy Engine to the Optimizer | |
privacy_engine = PrivacyEngine(model, batch_size=64, sample_size=60000, alphas=range(2,32), | |
noise_multiplier=1.3, max_grad_norm=1.0,) | |
privacy_engine.attach(optimizer) | |
# Step 5: Тренировка модели в течении 10 проходов | |
def train(model, train_loader, optimizer, epoch, device, delta): | |
model.train() | |
criterion = torch.nn.CrossEntropyLoss() | |
losses = [] | |
for _batch_idx, (data, target) in enumerate(tqdm(train_loader)): | |
data, target = data.to(device), target.to(device) | |
optimizer.zero_grad() | |
output = model(data) | |
loss = criterion(output, target) | |
loss.backward() | |
optimizer.step() | |
losses.append(loss.item()) | |
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(delta) | |
print( | |
f"Train Epoch: {epoch} \t" | |
f"Loss: {np.mean(losses):.6f} " | |
f"(ε = {epsilon:.2f}, δ = {delta}) for α = {best_alpha}") | |
for epoch in range(1, 11): | |
train(model, train_loader, optimizer, epoch, device="cpu", delta=1e-5) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Возникает ошибка
TypeError: init() got an unexpected keyword argument 'batch_size'
на данной конструкции
privacy_engine = PrivacyEngine(model, batch_size=64, sample_size=60000, alphas=range(2,32), noise_multiplier=1.3, max_grad_norm=1.0,)
Не могли бы вы подсказать с чем это связано
Заранее спасибо