-
-
Save dedeswim/1cb9404a6c107c7a929b26fa23c6fdcb to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import pytorch_lightning as pl | |
SIZE = 64 * 64 * 3 | |
class RandomDataset(torch.utils.data.Dataset): | |
def __init__(self, size, length): | |
self.len = length | |
self.data = torch.randn(length, size) | |
def __getitem__(self, index): | |
return self.data[index] | |
def __len__(self): | |
return self.len | |
class BoringModel(pl.LightningModule): | |
def __init__(self): | |
""" | |
Testing PL Module | |
Use as follows: | |
- subclass | |
- modify the behavior for what you want | |
class TestModel(BaseTestModel): | |
def training_step(...): | |
# do your own thing | |
or: | |
model = BaseTestModel() | |
model.training_epoch_end = None | |
""" | |
super().__init__() | |
self.layer = torch.nn.Linear(SIZE, 2) | |
def forward(self, x): | |
return self.layer(x) | |
def loss(self, batch, prediction): | |
# An arbitrary loss to have a loss that updates the model | |
# weights during `Trainer.fit` calls | |
return torch.nn.functional.mse_loss(prediction, | |
torch.ones_like(prediction)) | |
def step(self, x): | |
x = self(x) | |
out = torch.nn.functional.mse_loss(x, torch.ones_like(x)) | |
return out | |
def training_step(self, batch, batch_idx): | |
output = self.layer(batch) | |
loss = self.loss(batch, output) | |
return {"loss": loss} | |
def training_step_end(self, training_step_outputs): | |
return training_step_outputs | |
def training_epoch_end(self, outputs) -> None: | |
torch.stack([x["loss"] for x in outputs]).mean() | |
def validation_step(self, batch, batch_idx): | |
output = self.layer(batch) | |
loss = self.loss(batch, output) | |
return {"x": loss} | |
def validation_epoch_end(self, outputs) -> None: | |
torch.stack([x['x'] for x in outputs]).mean() | |
def test_step(self, batch, batch_idx): | |
output = self.layer(batch) | |
loss = self.loss(batch, output) | |
return {"y": loss} | |
def test_epoch_end(self, outputs) -> None: | |
torch.stack([x["y"] for x in outputs]).mean() | |
def configure_optimizers(self): | |
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1) | |
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1) | |
return [optimizer], [lr_scheduler] | |
def train_dataloader(self): | |
return torch.utils.data.DataLoader(RandomDataset(SIZE, 64)) | |
def val_dataloader(self): | |
return torch.utils.data.DataLoader(RandomDataset(SIZE, 64)) | |
def test_dataloader(self): | |
return torch.utils.data.DataLoader(RandomDataset(SIZE, 64)) | |
if __name__ == '__main__': | |
ds = RandomDataset(SIZE, 10000) | |
dl = torch.utils.data.DataLoader(ds, num_workers=16, batch_size=128) | |
pl.Trainer(gpus=[1, 2], max_epochs=20, | |
accelerator="ddp").fit(BoringModel(), dl) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment