Skip to content

Instantly share code, notes, and snippets.

@gorarakelyan
Created February 16, 2023 18:00
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gorarakelyan/936fb7b8fbde4de807500c5617b47ea8 to your computer and use it in GitHub Desktop.
Save gorarakelyan/936fb7b8fbde4de807500c5617b47ea8 to your computer and use it in GitHub Desktop.
Handwritten digit recognition with PyTorch, Hugging Face Datasets and Aim.
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets import load_dataset
from aim import Run
from aim.hf_dataset import HFDataset
# from aim.pytorch import track_gradients_dists, track_params_dists
# Defining hyper parameters
num_epochs = 2
num_classes = 10
batch_size = 16
learning_rate = 0.01
# Loading the dataset
version = '1.1.0'
revision = 'da9a9d63961462871324d514ca8cdca1e5624c5c'
dataset = load_dataset("gorar/A-MNIST", version=version, revision=revision)
# Defining train and test splits
train_dataset = dataset['train'].with_format('torch')
test_dataset = dataset['test'].with_format('torch')
# Initializing data loaders for train and test split
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
# Device configuration
device = torch.device('cpu')
# Initializing an Aim run
aim_run = Run()
# Track hyper-parameters
aim_run['hparams'] = {
'num_epochs': num_epochs,
'num_classes': num_classes,
'batch_size': batch_size,
'learning_rate': learning_rate,
}
# Tracking dataset metadata
aim_run['dataset'] = HFDataset(dataset)
aim_run['dataset', 'revision'] = revision
# Building a CNN (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7 * 7 * 32, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
model = ConvNet(num_classes).to(device)
# Initializing loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Training the model
total_step = len(train_loader)
print(total_step)
for epoch in range(num_epochs):
for i, samples in enumerate(train_loader):
images = samples['image'].to(torch.float32).reshape(batch_size, 1, 28, 28)
labels = samples['label']
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 30 == 0:
print('Epoch [{}/{}], Step [{}/{}], '
'Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1,
total_step, loss.item()))
correct = 0
total = 0
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = 100 * correct / total
# Tracking model train metrics
aim_run.track(acc, name='accuracy', epoch=epoch, context={'subset': 'train'})
aim_run.track(loss, name='loss', epoch=epoch, context={'subset': 'train'})
# Testing the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for samples in test_loader:
images = samples['image'].to(torch.float32).reshape(batch_size, 1, 28, 28)
labels = samples['label']
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# Tracking test metrics
aim_run.track(100 * correct / total, name='accuracy', context={'subset': 'test'})
print('Test Accuracy: {} %'.format(100 * correct / total))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment