Skip to content

Instantly share code, notes, and snippets.

@philipturner
Last active June 14, 2022 11:52
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save philipturner/d6fe8c9458bc22a1b7417d54e47e5f40 to your computer and use it in GitHub Desktop.
Save philipturner/d6fe8c9458bc22a1b7417d54e47e5f40 to your computer and use it in GitHub Desktop.
import Foundation
import PythonKit
// Replace the path below with where your Python is located.
//PythonLibrary.useLibrary(at: "/Users/philipturner/miniforge3/bin/python")
let torch = Python.import("torch")
let nn = torch.nn
let DataLoader = torch.utils.data.DataLoader
let torchvision = Python.import("torchvision")
let datasets = torchvision.datasets
let (ToTensor, Lambda, Compose) = (
torchvision.transforms.ToTensor,
torchvision.transforms.Lambda,
torchvision.transforms.Compose
)
let plt = Python.import("matplotlib.pyplot")
// Download training data from open datasets.
let training_data = datasets.FashionMNIST(
root: "data",
train: true,
download: true,
transform: ToTensor()
)
// Download test data from open datasets.
let test_data = datasets.FashionMNIST(
root: "data",
train: false,
download: true,
transform: ToTensor()
)
let batch_size = 64
// Create data loaders
let train_dataloader = DataLoader(training_data, batch_size: batch_size)
let test_dataloader = DataLoader(test_data, batch_size: batch_size)
let train_batches = [PythonObject](train_dataloader)!
let test_batches = [PythonObject](test_dataloader)!
for (X, y) in test_batches.map({ $0.tuple2 }) {
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
}
// Get cpu or gpu device for training.
let device = Bool(torch.cuda.is_available())! ? "cuda" : "cpu"
print("Using \(device) device")
// Define model
let NeuralNetwork = PythonClass("NeuralNetwork", superclasses: [nn.Module], members: [
"__init__": PythonInstanceMethod { (selfRef: PythonObject) in
Python.`super`(selfRef.__class__, selfRef).__init__()
selfRef.flatten = nn.Flatten()
selfRef.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
return Python.None
},
"forward": PythonInstanceMethod { (params: [PythonObject]) in
var (selfRef, x) = (params[0], params[1])
x = selfRef.flatten(x)
let logits = selfRef.linear_relu_stack(x)
return logits
}
]).pythonObject
let model = NeuralNetwork().to(device)
print(model)
let loss_fn = nn.CrossEntropyLoss()
let optimizer = torch.optim.SGD(model.parameters(), lr: 1e-3)
func train(
_ dataloader: PythonObject,
_ model: PythonObject,
_ loss_fn: PythonObject,
_ optimizer: PythonObject
) {
let size = Python.len(dataloader.dataset)
model.train()
for batch in 0..<train_batches.count {
var (X, y) = train_batches[batch].tuple2
(X, y) = (X.to(device), y.to(device))
// Compute prediction error
let pred = model(X)
var loss = loss_fn(pred, y)
// Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if Int(batch as Int) % 100 == 0 {
loss = loss.item()
let current = PythonObject(batch) * Python.len(X)
let roundedLoss = String(format: "%.7f", Double(loss)!)
print("loss: \(roundedLoss) [\(Int(current)!)/\(Int(size)!)]")
}
}
}
func test(
_ dataloader: PythonObject,
_ model: PythonObject,
_ loss_fn: PythonObject
) {
let size = Python.len(dataloader.dataset)
let num_batches = Python.len(dataloader)
model.eval()
var (test_loss, correct) = (Double(0), Double(0))
do {
let prev = torch.is_grad_enabled()
torch._C._set_grad_enabled(false)
defer { torch._C._set_grad_enabled(prev) }
for i in 0..<test_batches.count {
var (X, y) = test_batches[i].tuple2
(X, y) = (X.to(device), y.to(device))
let pred = model(X)
test_loss += Double(loss_fn(pred, y).item())!
correct += Double((pred.argmax(1) == y).type(torch.float).sum().item())!
}
}
test_loss /= Double(num_batches)!
correct /= Double(size)!
let rounded_correct = String(format: "%.1f", 100 * correct)
let rounded_loss = String(format: "%.8f", test_loss)
print("Test Error: \n Accuracy: \(rounded_correct)%, Avg loss: \(rounded_loss) \n")
}
let epochs = 5
for t in 0..<epochs {
print("Epoch \(t)\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
}
print("Done!")
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
do {
let model = NeuralNetwork()
model.load_state_dict(torch.load("model.pth"))
let classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
model.eval()
let (x, y) = (test_data[0][0], test_data[0][1])
let prev = torch.is_grad_enabled()
torch._C._set_grad_enabled(false)
defer { torch._C._set_grad_enabled(prev) }
let pred = model(x)
let (predicted, actual) = (classes[Int(pred[0].argmax(0))!], classes[Int(y)!])
print("Predicted: \"\(predicted)\", Actual: \"\(actual)\"")
}
@philipturner
Copy link
Author

philipturner commented Jan 21, 2022

This is a Swift translation of the PyTorch quickstart tutorial, enabled by PythonKit's new subclassing feature. At line 61, it subclasses torch.nn.Module.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment