Skip to content

Instantly share code, notes, and snippets.

Avatar
💭
Reject negativity. Stay positive. Use ReLU.

Ayan Das dasayan05

💭
Reject negativity. Stay positive. Use ReLU.
View GitHub Profile
@dasayan05
dasayan05 / mog.py
Last active May 6, 2020
Example usage of Pyro for MoG
View mog.py
import pyro, torch, numpy as np
import pyro.distributions as dist
import pyro.optim as optim
import pyro.infer as infer
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from scipy.stats import norm
plt.ioff()
def getdata(N, mean1=2.0, mean2=-1.0, std1=0.5, std2=0.5):
@dasayan05
dasayan05 / sync_gradients.py
Created Feb 27, 2019
Synchronize gradients
View sync_gradients.py
def sync_gradients(model, rank, world_size):
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
@dasayan05
dasayan05 / sync_init_weights.py
Created Feb 27, 2019
Synchronize intial weights
View sync_init_weights.py
def sync_initial_weights(model, rank, world_size):
for param in model.parameters():
if rank == 0:
# Rank 0 is sending it's own weight
# to all it's siblings (1 to world_size)
for sibling in range(1, world_size):
dist.send(param.data, dst=sibling)
else:
# Siblings must recieve the parameters
dist.recv(param.data, src=0)
@dasayan05
dasayan05 / train.py
Created Feb 27, 2019
Distributed training of DL model
View train.py
model = LeNet()
# first synchronization of initial weights
sync_initial_weights(model, rank, world_size)
optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.85)
model.train()
for epoch in range(1, epochs + 1):
for data, target in train_loader:
optimizer.zero_grad()
@dasayan05
dasayan05 / allreduce.py
Created Feb 27, 2019
Basic usage of All-reduce
View allreduce.py
def main(rank, world):
if rank == 0:
x = torch.tensor([1.])
elif rank == 1:
x = torch.tensor([2.])
elif rank == 2:
x = torch.tensor([-3.])
dist.all_reduce(x, op=dist.reduce_op.SUM)
print('Rank {} has {}'.format(rank, x))
@dasayan05
dasayan05 / ptdist.py
Last active Dec 4, 2019
Peer-to-peer communication
View ptdist.py
# filename 'ptdist.py'
import torch
import torch.distributed as dist
def main(rank, world):
if rank == 0:
x = torch.tensor([1., -1.]) # Tensor of interest
dist.send(x, dst=1)
print('Rank-0 has sent the following tensor to Rank-1')
print(x)
else: