Last active
July 18, 2024 22:47
-
-
Save woshiyyya/45a1d8b32228e0d67a660584ace4aa41 to your computer and use it in GitHub Desktop.
Train Dashboard BugBash
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import argparse | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torch.optim as optim | |
from torchvision import datasets, transforms | |
from torch.optim.lr_scheduler import StepLR | |
from ray.train.torch import TorchTrainer | |
from ray.train import ScalingConfig, RunConfig | |
import ray.train | |
from torch.utils.data import Subset | |
class Net(nn.Module): | |
def __init__(self): | |
super(Net, self).__init__() | |
self.conv1 = nn.Conv2d(1, 32, 3, 1) | |
self.conv2 = nn.Conv2d(32, 64, 3, 1) | |
self.dropout1 = nn.Dropout(0.25) | |
self.dropout2 = nn.Dropout(0.5) | |
self.fc1 = nn.Linear(246016, 128) | |
self.fc2 = nn.Linear(128, 10) | |
def forward(self, x): | |
x = self.conv1(x) | |
x = F.relu(x) | |
x = self.conv2(x) | |
x = F.relu(x) | |
x = F.max_pool2d(x, 2) | |
x = self.dropout1(x) | |
x = torch.flatten(x, 1) | |
x = self.fc1(x) | |
x = F.relu(x) | |
x = self.dropout2(x) | |
x = self.fc2(x) | |
x = F.log_softmax(x, dim=1) | |
return x | |
def train(args, model, train_loader, optimizer, epoch): | |
model.train() | |
for batch_idx, (data, target) in enumerate(train_loader): | |
data, target = data.to("cuda"), target.to("cuda") | |
optimizer.zero_grad() | |
output = model(data) | |
loss = F.nll_loss(output, target) | |
loss.backward() | |
optimizer.step() | |
if batch_idx % args.log_interval == 0: | |
print( | |
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( | |
epoch, batch_idx * len(data), len(train_loader.dataset), | |
100. * batch_idx / len(train_loader), loss.item() | |
) | |
) | |
if args.dry_run: | |
break | |
def test(model, test_loader): | |
model.eval() | |
test_loss = 0 | |
correct = 0 | |
with torch.no_grad(): | |
for data, target in test_loader: | |
data, target = data.to("cuda"), target.to("cuda") | |
output = model(data) | |
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss | |
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability | |
correct += pred.eq(target.view_as(pred)).sum().item() | |
test_loss /= len(test_loader.dataset) | |
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( | |
test_loss, correct, len(test_loader.dataset), | |
100. * correct / len(test_loader.dataset))) | |
def train_func(config): | |
args = config["args"] | |
torch.manual_seed(42) | |
use_cuda = True | |
device = "cuda" if use_cuda else "cpu" | |
train_kwargs = {'batch_size': args.batch_size} | |
test_kwargs = {'batch_size': args.test_batch_size} | |
if use_cuda: | |
cuda_kwargs = {'num_workers': 0, | |
'pin_memory': True, | |
'shuffle': True} | |
train_kwargs.update(cuda_kwargs) | |
test_kwargs.update(cuda_kwargs) | |
if ray.train.get_context().get_local_rank() == 0: | |
dataset1 = datasets.MNIST('data', train=True, download=True) | |
torch.distributed.barrier() | |
else: | |
torch.distributed.barrier() | |
dataset1 = datasets.MNIST('data', train=True, download=False) | |
dataset2 = datasets.MNIST('data', train=False) | |
subset_size = int(0.3 * len(dataset1)) | |
indices = torch.randperm(len(dataset1))[:subset_size] | |
dataset1 = Subset(dataset1, indices) | |
def collate(batch): | |
transform = transforms.Compose([ | |
transforms.ToTensor(), | |
transforms.Normalize((0.1307,), (0.3081,)), | |
transforms.Resize(128), | |
]) | |
data = torch.stack([transform(img) for img, _ in batch]) | |
target = torch.tensor([label for _, label in batch]) | |
return data, target | |
train_loader = torch.utils.data.DataLoader(dataset1, collate_fn=collate, **train_kwargs) | |
test_loader = torch.utils.data.DataLoader(dataset2, collate_fn=collate, **test_kwargs) | |
model = Net().to(device) | |
optimizer = optim.SGD(model.parameters(), lr=args.lr) | |
model = ray.train.torch.prepare_model(model) | |
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) | |
for epoch in range(args.epochs + 1): | |
train(args, model, train_loader, optimizer, epoch) | |
test(model, test_loader) | |
scheduler.step() | |
if args.save_model: | |
if ray.train.get_context().get_local_rank() == 0: | |
torch.save(model.state_dict(), f"mnist_cnn_{epoch}.pt") | |
torch.distributed.barrier() | |
print("Training Finished!") | |
def main(): | |
# Training settings | |
parser = argparse.ArgumentParser(description='PyTorch MNIST Example') | |
parser.add_argument('--batch-size', type=int, default=1024, metavar='N', | |
help='input batch size for training (default: 1024)') | |
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N', | |
help='input batch size for testing (default: 1024)') | |
parser.add_argument('--epochs', type=int, default=2, metavar='N', | |
help='number of epochs to train (default: 2)') | |
parser.add_argument('--lr', type=float, default=1e-2, metavar='LR', | |
help='learning rate (default: 1e-2)') | |
parser.add_argument('--gamma', type=float, default=0.7, metavar='M', | |
help='Learning rate step gamma (default: 0.7)') | |
parser.add_argument('--dry-run', action='store_true', default=False, | |
help='quickly check a single pass') | |
parser.add_argument('--seed', type=int, default=1, metavar='S', | |
help='random seed (default: 1)') | |
parser.add_argument('--log-interval', type=int, default=10, metavar='N', | |
help='how many batches to wait before logging training status') | |
parser.add_argument('--save-model', action='store_true', default=True, | |
help='For Saving the current Model') | |
args = parser.parse_args() | |
trainer = TorchTrainer( | |
train_func, | |
train_loop_config={"args": args}, | |
scaling_config=ScalingConfig( | |
num_workers=5, | |
use_gpu=True | |
), | |
run_config=RunConfig( | |
name="test_run", | |
storage_path="/mnt/cluster_storage/train_run" | |
) | |
) | |
trainer.fit() | |
if __name__ == '__main__': | |
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import argparse | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torch.optim as optim | |
from torchvision import datasets, transforms | |
from torch.optim.lr_scheduler import StepLR | |
from ray.train.torch import TorchTrainer | |
from ray.train import ScalingConfig, RunConfig | |
import ray.train | |
from torch.utils.data import Subset | |
class Net(nn.Module): | |
def __init__(self): | |
super(Net, self).__init__() | |
self.conv1 = nn.Conv2d(1, 32, 3, 1) | |
self.conv2 = nn.Conv2d(32, 64, 3, 1) | |
self.dropout1 = nn.Dropout(0.25) | |
self.dropout2 = nn.Dropout(0.5) | |
self.fc1 = nn.Linear(246016, 128) | |
self.fc2 = nn.Linear(128, 10) | |
def forward(self, x): | |
x = self.conv1(x) | |
x = F.relu(x) | |
x = self.conv2(x) | |
x = F.relu(x) | |
x = F.max_pool2d(x, 2) | |
x = self.dropout1(x) | |
x = torch.flatten(x, 1) | |
x = self.fc1(x) | |
x = F.relu(x) | |
x = self.dropout2(x) | |
x = self.fc2(x) | |
x = F.log_softmax(x, dim=1) | |
return x | |
def train(args, model, train_loader, optimizer, epoch): | |
model.train() | |
for batch_idx, (data, target) in enumerate(train_loader): | |
data, target = data.to("cuda"), target.to("cuda") | |
optimizer.zero_grad() | |
output = model(data) | |
loss = F.nll_loss(output, target) | |
loss.backward() | |
optimizer.step() | |
if batch_idx % args.log_interval == 0: | |
print( | |
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( | |
epoch, batch_idx * len(data), len(train_loader.dataset), | |
100. * batch_idx / len(train_loader), loss.item() | |
) | |
) | |
if args.dry_run: | |
break | |
def test(model, test_loader): | |
model.eval() | |
test_loss = 0 | |
correct = 0 | |
with torch.no_grad(): | |
for data, target in test_loader: | |
data, target = data.to("cuda"), target.to("cuda") | |
output = model(data) | |
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss | |
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability | |
correct += pred.eq(target.view_as(pred)).sum().item() | |
test_loss /= len(test_loader.dataset) | |
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( | |
test_loss, correct, len(test_loader.dataset), | |
100. * correct / len(test_loader.dataset))) | |
def train_func(config): | |
args = config["args"] | |
torch.manual_seed(42) | |
use_cuda = True | |
device = "cuda" if use_cuda else "cpu" | |
train_kwargs = {'batch_size': args.batch_size} | |
test_kwargs = {'batch_size': args.test_batch_size} | |
if use_cuda: | |
cuda_kwargs = {'num_workers': 0, | |
'pin_memory': True, | |
'shuffle': True} | |
train_kwargs.update(cuda_kwargs) | |
test_kwargs.update(cuda_kwargs) | |
if ray.train.get_context().get_local_rank() == 0: | |
dataset1 = datasets.MNIST('data', train=True, download=True) | |
torch.distributed.barrier() | |
else: | |
torch.distributed.barrier() | |
dataset1 = datasets.MNIST('data', train=True, download=False) | |
dataset2 = datasets.MNIST('data', train=False) | |
subset_size = int(0.3 * len(dataset1)) | |
indices = torch.randperm(len(dataset1))[:subset_size] | |
dataset1 = Subset(dataset1, indices) | |
def collate(batch): | |
transform = transforms.Compose([ | |
transforms.ToTensor(), | |
transforms.Normalize((0.1307,), (0.3081,)), | |
transforms.Resize(128), | |
]) | |
data = torch.stack([transform(img) for img, _ in batch]) | |
target = torch.tensor([label for _, label in batch]) | |
return data, target | |
train_loader = torch.utils.data.DataLoader(dataset1, collate_fn=collate, **train_kwargs) | |
test_loader = torch.utils.data.DataLoader(dataset2, collate_fn=collate, **test_kwargs) | |
model = Net().to(device) | |
optimizer = optim.SGD(model.parameters(), lr=args.lr) | |
model = ray.train.torch.prepare_model(model) | |
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) | |
for epoch in range(args.epochs + 1): | |
train(args, model, train_loader, optimizer, epoch) | |
test(model, test_loader) | |
scheduler.step() | |
if args.save_model: | |
if ray.train.get_context().get_local_rank() == 0: | |
torch.save(model.state_dict(), f"mnist_cnn_{epoch}.pt") | |
print("Training Finished!") | |
def main(): | |
# Training settings | |
parser = argparse.ArgumentParser(description='PyTorch MNIST Example') | |
parser.add_argument('--batch-size', type=int, default=256, metavar='N', | |
help='input batch size for training (default: 1024)') | |
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N', | |
help='input batch size for testing (default: 1024)') | |
parser.add_argument('--epochs', type=int, default=100, metavar='N', | |
help='number of epochs to train (default: 100)') | |
parser.add_argument('--lr', type=float, default=1e-2, metavar='LR', | |
help='learning rate (default: 1e-2)') | |
parser.add_argument('--gamma', type=float, default=0.7, metavar='M', | |
help='Learning rate step gamma (default: 0.7)') | |
parser.add_argument('--dry-run', action='store_true', default=False, | |
help='quickly check a single pass') | |
parser.add_argument('--seed', type=int, default=1, metavar='S', | |
help='random seed (default: 1)') | |
parser.add_argument('--log-interval', type=int, default=10, metavar='N', | |
help='how many batches to wait before logging training status') | |
parser.add_argument('--save-model', action='store_true', default=True, | |
help='For Saving the current Model') | |
args = parser.parse_args() | |
trainer = TorchTrainer( | |
train_func, | |
train_loop_config={"args": args}, | |
scaling_config=ScalingConfig( | |
num_workers=4, | |
use_gpu=True | |
), | |
run_config=RunConfig( | |
name="test_run", | |
storage_path="/mnt/cluster_storage/train_run" | |
) | |
) | |
trainer.fit() | |
if __name__ == '__main__': | |
main() | |
# Task: Find the following Information | |
# | Number of GPUs used in this run | | | |
# | Number of Workers | | | |
# | GPU Utilization | | | |
# | GPU Memory Usage | | | |
# | Training Run Start Time | | | |
# | Training Run End Time | | | |
# | GPU ID of the World Rank 0 Worker | | | |
# | PID of the World Rank 1 Worker | | | |
# | Node IP of the World Rank 2 Worker | | | |
# | Logs of the World Rank 3 Worker | | | |
# | CPU Flamegraph of local_rank 0 worker | | | |
# | StackTrace of local_rank 1 worker | | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment