Skip to content

Instantly share code, notes, and snippets.

@Xiuyu-Li
Created October 20, 2020 08:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Xiuyu-Li/cd99c7d75e9b705c599d25b412593fed to your computer and use it in GitHub Desktop.
Save Xiuyu-Li/cd99c7d75e9b705c599d25b412593fed to your computer and use it in GitHub Desktop.
torch and keras test scripts
import numpy as np
from typing import Tuple
import tensorflow as tf
import torch
import torchvision
import torchvision.transforms as transforms
# Set verbosity.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def small_cnn(input_shape: Tuple[int],
num_classes: int,
num_conv: int) -> tf.keras.models.Sequential:
"""Setup a small CNN for image classification.
Args:
input_shape: Integer tuple for the shape of the images.
num_classes: Number of prediction classes.
num_conv: Number of convolutional layers.
Returns:
The Keras model.
"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=input_shape))
# Conv layers
for _ in range(num_conv):
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes))
return model
dataset = 'cifar10'
num_classes = 10
num_conv = 3
epochs = 100
lr = 0.02
momentum = 0.9
batch_size = 250
print('Loading the dataset.')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=len(trainset), shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=len(testset), shuffle=False)
dataloaders = {"train": trainloader, "test": testloader}
for phase in ['train', 'test']:
for _, (data, target) in enumerate(dataloaders[phase]):
if phase == "train":
x_train = data.detach().numpy()
y_train_indices = target.detach().numpy()
else:
x_test = data.detach().numpy()
y_test_indices = target.detach().numpy()
x_train = np.transpose(x_train, (0, 3, 2, 1))
y_train_indices = y_train_indices.reshape(-1, 1)
x_test = np.transpose(x_test, (0, 3, 2, 1))
y_test_indices = y_test_indices.reshape(-1, 1)
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train_indices, num_classes)
y_test = tf.keras.utils.to_categorical(y_test_indices, num_classes)
input_shape = x_train.shape[1:]
model = small_cnn(input_shape, num_classes, num_conv=num_conv)
optimizer = tf.keras.optimizers.SGD(lr=lr, momentum=momentum)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
model.summary()
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
print('Finished training.')
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import models, transforms
from torchsummary import summary
class Net_SMALL_CNN(nn.Module):
def __init__(self, num_classes=10, num_conv=3):
super(Net_SMALL_CNN, self).__init__()
self.num_conv = num_conv
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 32, 3)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(128, 64)
self.fc2 = nn.Linear(64, num_classes)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
for _ in range(self.num_conv - 1):
x = self.pool(torch.relu(self.conv2(x)))
x = nn.Flatten()(x)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
def small_cnn(num_classes, num_conv=3):
return Net_SMALL_CNN(num_classes, num_conv)
def train(trainloader, model, criterion, optimizer, epoch, device):
model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item() * targets.size(0)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return train_loss/total, 100.*correct/total
def test(testloader, model, criterion, epoch, device):
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item() * targets.size(0)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return test_loss/total, 100.*correct/total
dataset = 'cifar10'
num_classes = 10
num_conv = 3
epochs = 100
lr = 0.02
momentum = 0.9
batch_size = 250
print('Loading the dataset.')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = small_cnn(num_classes, num_conv)
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss()
summary(model, (3, 32, 32))
for epoch in range(epochs):
epoch_start = time.time()
train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, device)
test_loss, test_acc = test(testloader, model, criterion, epoch, device)
print(f'Epoch {epoch + 1} Time Elapsed {time.time() - epoch_start:.4f}s Train Loss {train_loss:.4f} ' +
f'Test Loss {test_loss:.4f} Train Acc {train_acc:.4f} Test Acc {test_acc:.4f}')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment