Skip to content

Instantly share code, notes, and snippets.

@simeneide
Created January 28, 2020 11:46
Show Gist options
  • Save simeneide/56755565c8b70bc13ea7b83f4242c5d2 to your computer and use it in GitHub Desktop.
Save simeneide/56755565c8b70bc13ea7b83f4242c5d2 to your computer and use it in GitHub Desktop.
#%% IMPORTS
import torch
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from pytorch_lightning import Trainer
from torch.nn import functional as F
import pyro
import pyro.distributions as dist
# %%
class CoolSystem(pl.LightningModule):
def __init__(self):
super(CoolSystem, self).__init__()
# not the best model...
self.l1 = torch.nn.Linear(1, 1)
def forward(self, x):
return self.l1(x)
def training_step(self, batch, batch_idx):
x,y = batch
yhat = self.forward(x)
loss = (yhat-y).abs().mean()
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
x,y = batch
yhat = self.forward(x)
loss = (yhat-y).abs().mean()
return {'val_loss': loss}
def validation_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
# (LBFGS it is automatically supported, no need for closure function)
return torch.optim.Adam(self.parameters(), lr=0.02)
@pl.data_loader
def train_dataloader(self):
x = torch.arange(100).float().view(-1,1)
y = 2 + x + torch.distributions.Normal(0,1).sample((len(x),)).view(-1,1)*0.2
ds = torch.utils.data.TensorDataset(x,y)
dataloader = torch.utils.data.DataLoader(dataset=ds, batch_size = 2)
return dataloader
@pl.data_loader
def val_dataloader(self):
x = torch.arange(10).float().view(-1,1)
y = 2 + x + torch.distributions.Normal(0,1).sample((len(x),)).view(-1,1)*0.2
ds = torch.utils.data.TensorDataset(x,y)
dataloader = torch.utils.data.DataLoader(dataset=ds, batch_size = 2)
return dataloader
# %%
system = CoolSystem()
# most basic trainer, uses good defaults
trainer = Trainer(min_epochs=1)
trainer.fit(system)
# RESULTS
list(system.parameters())
# %% PYRO LIGHTNING!!
#%%
import torch
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from pytorch_lightning import Trainer
from torch.nn import functional as F
import pyro
import pyro.distributions as dist
class PyroOptWrap(pyro.infer.SVI):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def state_dict(self,):
return {}
class PyroCoolSystem(pl.LightningModule):
def __init__(self, num_data = 100, lr = 1e-3):
super(PyroCoolSystem, self).__init__()
self.lr = lr
self.num_data =num_data
def model(self, batch):
x, y = batch
yhat = self.forward(x)
obsdistr = dist.Normal(yhat, 0.2)#.to_event(1)
pyro.sample("obs", obsdistr, obs = y)
return yhat
def guide(self, batch):
b_m = pyro.param("b-mean", torch.tensor(0.1))
a_m = pyro.param("a-mean", torch.tensor(0.1))
b = pyro.sample("beta", dist.Normal(b_m , 0.1))
a = pyro.sample("alpha", dist.Normal(a_m,0.1))
def forward(self, x):
b = pyro.sample("beta", dist.Normal(0,1))
a = pyro.sample("alpha", dist.Normal(0,1))
yhat = a + x*b
return yhat
def training_step(self, batch, batch_idx):
#x,y = batch
#yhat = self.forward(x)
loss = self.svi.step(batch)
loss = torch.tensor(loss).requires_grad_(True)
tensorboard_logs = {'running/loss': loss, 'param/a-mean': pyro.param("a-mean"), 'param/b-mean': pyro.param("b-mean") }
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss = self.svi.evaluate_loss(batch)
loss = torch.tensor(loss).requires_grad_(True)
return {'val_loss': loss}
def validation_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
#print(pyro.param("a-mean"), pyro.param('b-mean'))
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
# (LBFGS it is automatically supported, no need for closure function)
self.svi = PyroOptWrap(model=self.model,
guide=self.guide,
optim=pyro.optim.SGD({"lr": self.lr, "momentum":0.0}),
loss=pyro.infer.Trace_ELBO())
return [self.svi]
@pl.data_loader
def train_dataloader(self):
x = torch.rand((self.num_data,)).float().view(-1,1)
y = 2 + x + torch.distributions.Normal(0,1).sample((len(x),)).view(-1,1)*0.2
ds = torch.utils.data.TensorDataset(x,y)
dataloader = torch.utils.data.DataLoader(dataset=ds, batch_size = 2)
return dataloader
@pl.data_loader
def val_dataloader(self):
x = torch.rand((100,)).float().view(-1,1)
y = 2 + x + torch.distributions.Normal(0,1).sample((len(x),)).view(-1,1)*0.2
ds = torch.utils.data.TensorDataset(x,y)
dataloader = torch.utils.data.DataLoader(dataset=ds, batch_size = 10)
return dataloader
def optimizer_step(self, *args, **kwargs):
pass
def backward(self, *args, **kwargs):
pass
# %%
pyro.clear_param_store()
system = PyroCoolSystem(num_data=2)
# most basic trainer, uses good defaults
trainer = Trainer(min_epochs=1, max_epochs=100)
trainer.fit(system)
# %%
# %%
@gianmarcoaversanoenx
Copy link

gianmarcoaversanoenx commented Dec 1, 2022

Copy-pasted in a notebook with pytorch-lightning and pyro installed, and got the following error:

---------------------------------------------------------------------------
MisconfigurationException                 Traceback (most recent call last)
Cell In[6], line 5
      3 # most basic trainer, uses good defaults
      4 trainer = Trainer(min_epochs=1, max_epochs=100)
----> 5 trainer.fit(system)

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:582, in Trainer.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
    580     raise TypeError(f"`Trainer.fit()` requires a `LightningModule`, got: {model.__class__.__qualname__}")
    581 self.strategy._lightning_module = model
--> 582 call._call_and_handle_interrupt(
    583     self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
    584 )

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/trainer/call.py:38, in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
     36         return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
     37     else:
---> 38         return trainer_fn(*args, **kwargs)
     40 except _TunerExitException:
     41     trainer._call_teardown_hook()

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:624, in Trainer._fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
    617 ckpt_path = ckpt_path or self.resume_from_checkpoint
    618 self._ckpt_path = self._checkpoint_connector._set_ckpt_path(
    619     self.state.fn,
    620     ckpt_path,  # type: ignore[arg-type]
    621     model_provided=True,
    622     model_connected=self.lightning_module is not None,
    623 )
--> 624 self._run(model, ckpt_path=self.ckpt_path)
    626 assert self.state.stopped
    627 self.training = False

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:1042, in Trainer._run(self, model, ckpt_path)
   1039 self._logger_connector.reset_metrics()
   1041 # strategy will configure model and move it to the device
-> 1042 self.strategy.setup(self)
   1044 # hook
   1045 if self.state.fn == TrainerFn.FITTING:

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/strategies/single_device.py:74, in SingleDeviceStrategy.setup(self, trainer)
     72 def setup(self, trainer: pl.Trainer) -> None:
     73     self.model_to_device()
---> 74     super().setup(trainer)

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/strategies/strategy.py:154, in Strategy.setup(self, trainer)
    152 assert self.accelerator is not None
    153 self.accelerator.setup(trainer)
--> 154 self.setup_optimizers(trainer)
    155 self.setup_precision_plugin()
    156 _optimizers_to_device(self.optimizers, self.root_device)

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/strategies/strategy.py:142, in Strategy.setup_optimizers(self, trainer)
    140     return
    141 assert self.lightning_module is not None
--> 142 self.optimizers, self.lr_scheduler_configs, self.optimizer_frequencies = _init_optimizers_and_lr_schedulers(
    143     self.lightning_module
    144 )

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py:188, in _init_optimizers_and_lr_schedulers(model)
    183     rank_zero_warn(
    184         "`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",
    185     )
    186     optim_conf = _MockOptimizer()
--> 188 optimizers, lr_schedulers, optimizer_frequencies, monitor = _configure_optimizers(optim_conf)
    189 lr_scheduler_configs = (
    190     _configure_schedulers_automatic_opt(lr_schedulers, monitor)
    191     if model.automatic_optimization
    192     else _configure_schedulers_manual_opt(lr_schedulers)
    193 )
    194 _set_scheduler_opt_idx(optimizers, lr_scheduler_configs)

File ~/.pyenv/versions/3.8.13/envs/brainiac-2/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py:251, in _configure_optimizers(optim_conf)
    248     optimizers = list(optim_conf)
    249 # unknown configuration
    250 else:
--> 251     raise MisconfigurationException(
    252         "Unknown configuration for model optimizers."
    253         " Output from `model.configure_optimizers()` should be one of:\n"
    254         " * `Optimizer`\n"
    255         " * [`Optimizer`]\n"
    256         " * ([`Optimizer`], [`_LRScheduler`])\n"
    257         ' * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `_LRScheduler`}\n'
    258         ' * A list of the previously described dict format, with an optional "frequency" key (int)'
    259     )
    260 return optimizers, lr_schedulers, optimizer_frequencies, monitor

MisconfigurationException: Unknown configuration for model optimizers. Output from `model.configure_optimizers()` should be one of:
 * `Optimizer`
 * [`Optimizer`]
 * ([`Optimizer`], [`_LRScheduler`])
 * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `_LRScheduler`}
 * A list of the previously described dict format, with an optional "frequency" key (int)

@simeneide
Copy link
Author

Ok, don't know about that. Both pyro and pytorch lightning have probably done massive updates since i created this gist unfortunately

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment