Skip to content

Instantly share code, notes, and snippets.

@konverner
Last active May 28, 2023 22:15
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save konverner/65fe9b22ba90a9cd496ac2e3e6a5e100 to your computer and use it in GitHub Desktop.
Save konverner/65fe9b22ba90a9cd496ac2e3e6a5e100 to your computer and use it in GitHub Desktop.
Template for image classification with pytorch on mnist
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from tqdm import tqdm
# Set device (GPU or CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
input_size = 28 * 28 # MNIST images are 28x28 pixels
hidden_size = 128
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 5
# Load MNIST dataset
train_dataset = MNIST(root='./data', train=True, transform=ToTensor(), download=True)
test_dataset = MNIST(root='./data', train=False, transform=ToTensor())
# Create data loaders
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# Define the MLP model
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.size(0), -1) # Flatten the input images
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# Initialize the model
model = MLP(input_size, hidden_size, num_classes).to(device)
print("{:,} parameters".format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Training loop
for epoch in range(num_epochs):
for images, labels in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{num_epochs}", unit="batch"):
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Evaluation
model.eval()
with torch.no_grad():
correct = 0
for images, labels in tqdm(test_loader, desc="Evaluating", unit="batch"):
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum().item()
accuracy = correct / len(test_dataset)
print(f"Accuracy on the test set: {accuracy * 100:.2f}%")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment