Skip to content

Instantly share code, notes, and snippets.

View giacaglia's full-sized avatar
💻
Coding!

Giuliano Pezzolo Giacaglia giacaglia

💻
Coding!
View GitHub Profile
@giacaglia
giacaglia / test_function.py
Created July 26, 2021 16:29
Test function for move zeros to left
def test_function(test_case):
move_zeros_to_left(test_case[0])
if test_case[0] == test_case[1]:
print("Pass")
else:
print("Fail")
@giacaglia
giacaglia / move_zeros_to_left.py
Created July 26, 2021 16:27
Move Zeros to the left of array
def move_zeros_to_left(array):
i = len(array) - 1
j = len(array) - 1
while i >= 0:
if array[i] != 0:
array[j] = array[i]
j -= 1
i -= 1
while j >= 0:
array[j] = 0
# Save checkpoint
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict()
}
torch.save(checkpoint, 'amp_checkpoint.pt')
...
rank = args.nr * args.gpus + gpu
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=rank)
torch.manual_seed(0)
model = ConvNet()
torch.cuda.set_device(gpu)
python src/mnist-distributed.py -n 4 -g 8 -nr i
python src/mnist-distributed.py -n 4 -g 8 -nr 0
def train(gpu, args):
############################################################
rank = args.nr * args.gpus + gpu
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=rank
)
############################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--nodes', default=1,
type=int, metavar='N')
parser.add_argument('-g', '--gpus', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-nr', '--nr', default=0, type=int,
help='ranking within the nodes')
parser.add_argument('--epochs', default=2, type=int,
metavar='N',
def train(gpu, args):
torch.manual_seed(0)
model = ConvNet()
model = nn.DataParallel(model)
torch.cuda.set_device(gpu)
model.cuda(gpu)
batch_size = 100
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), 1e-4)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),