Created
November 13, 2020 08:27
-
-
Save devil-cyber/5acc6c09c93ca13c644209c327fc66eb to your computer and use it in GitHub Desktop.
CNN classifier using CIFAR10 dataset with Pytorch
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
import torchvision | |
from torchvision.transforms import transforms | |
from torchvision import datasets | |
from torch.utils.data import DataLoader | |
import torch.nn.functional as F | |
# HyperParameter | |
device = torch.device('cuda' if torch.cuda.is_available else 'cpu') | |
num_epochs=10 | |
batch_size=4 | |
learning_rate=0.001 | |
transform = transforms.Compose([ | |
transforms.ToTensor(), | |
transforms.Normalize((.5,.5,.5),(.5,.5,.5)) # This is the mean and respective std value and as the dataset contains three channel due to | |
# to this mean=(.5,.5,.5) for all three and std=(.5,.5,.5) for all three channel. | |
# This method will normalize the image data that is between (0,1) to (-1,1) | |
# .eg. for min image value 0 : (image-mean)/std -> (0-.5)/.5 = -1 | |
# and for max image value 1 : (image - mean)/std -> (1-.5)/.5 = 1 | |
]) | |
train_dataset = datasets.CIFAR10(root='./data',train=True,transform=transform,download=True) | |
test_dataset = datasets.CIFAR10(root='./data',train=False,transform=transform,download=True) | |
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,shuffle=True) | |
test_loader = DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False) | |
classes = ('plane','car','bird','cat','deer','dog','frog','horse','ship','truck') | |
# Implement Conv Net | |
class ConvNet(nn.Module): | |
def __init__(self): | |
super(ConvNet, self).__init__() | |
self.conv1 = nn.Conv2d(3, 6, 5) | |
self.pool = nn.MaxPool2d(2,2) | |
self.conv2 = nn.Conv2d(6,16,5) | |
self.fc1 = nn.Linear(16*5*5, 120) | |
self.fc2 = nn.Linear(120, 84) | |
self.fc3 = nn.Linear(84, 10) | |
def forward(self,x): | |
x = self.pool(F.relu(self.conv1(x))) | |
x = self.pool(F.relu(self.conv2(x))) | |
x = x.view(-1, 16*5*5) | |
x = F.relu(self.fc1(x)) | |
x = F.relu(self.fc2(x)) | |
x = F.relu(self.fc3(x)) | |
return x | |
model = ConvNet() | |
criterion = nn.CrossEntropyLoss() | |
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate) | |
n_total_step = len(train_loader) | |
for epoch in range(num_epochs): | |
for i,(images, labels) in enumerate(train_loader): | |
# origin shape (4,3,32,32) = 4,3,1024 | |
# input_layer: 3 input channel, 6 output channel, 5 kernal size | |
outputs = model(images) | |
loss = criterion(outputs, labels) | |
# backward prop. | |
loss.backward() | |
optimizer.step() | |
optimizer.zero_grad() | |
if (i+1) % 2000 == 0: | |
print(f"Epoch [{epoch+1}/{num_epochs}], step [{i+1}/{n_total_step}], Loss: {loss.item():.4f}") | |
print("Finished training") | |
PATH = './cnn.pth' | |
torch.save(model.state_dict(),PATH) | |
with torch.no_grad(): | |
n_correct = 0 | |
n_samples = 0 | |
n_class_correct = [0 for i in range(10)] | |
n_class_sample = [0 for i in range(10)] | |
for images, labels in test_loader: | |
outputs = model(images) | |
_, predicted = torch.max(outputs,1) | |
n_samples += labels.size(0) | |
n_correct += (predicted == labels).sum().item() | |
for i in range(batch_size): | |
label = labels[i] | |
pred = predicted[i] | |
if(label==pred): | |
n_class_correct[label] += 1 | |
n_class_sample[label] += 1 | |
acc = 100 * n_correct / n_samples | |
print(f"Accuracy of the network: {acc}%") | |
for i in range(10): | |
acc = 100 * n_class_correct[i]/ n_class_sample[i] | |
print(f"Accuracy of {classes[i]}: {acc} %") | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment