Skip to content

Instantly share code, notes, and snippets.

View woshiyyya's full-sized avatar
zzz

Yunxuan Xiao woshiyyya

zzz
View GitHub Profile
@woshiyyya
woshiyyya / test_wandb_ptl.py
Created April 18, 2023 00:20
[Canva] Integrating Wandb Logger with LightningTrainer
View test_wandb_ptl.py
import os
import ray
from ray.air.config import ScalingConfig
from ray.train.lightning import LightningTrainer, LightningConfigBuilder
from ray.air.integrations.wandb import setup_wandb
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
View test_lightning_trainer_gram.py
import ray
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from datasets import load_dataset, load_metric
import numpy as np
from ray.train.torch import TorchTrainer
View ptl_tune_dataset.py
class MNISTClassifier(pl.LightningModule):
def __init__(self, config):
super(MNISTClassifier, self).__init__()
self.accuracy = Accuracy()
# [!] Determine your data augmentation strategy here
self.batch_size = config["batch_size"]
self.aug_strategy = config["augmentation_strategy"]
View test-longrun.py
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from filelock import FileLock
from torch.utils.data import DataLoader, random_split
from torchmetrics import Accuracy
from torchvision.datasets import MNIST
from torchvision import transforms
View new_lightning_user_guide.md

PyTorch Lightning User Guides

Converting an existing training loop

You should replace the arguments in pl.Trainer with Ray Train's implementations.

import pytorch_lightning as pl
+ from ray.train.lightning import (
+     get_devices,
+ prepare_trainer,
@woshiyyya
woshiyyya / transformers_torch_trainer.py
Created August 8, 2023 04:42
Run Transformers Trainer with Ray TorchTrainer
View transformers_torch_trainer.py
import os
import evaluate
import numpy as np
from datasets import load_dataset
from ray.train import RunConfig, ScalingConfig, CheckpointConfig, Checkpoint
from ray.train.torch import TorchTrainer
from transformers import AutoTokenizer
from transformers import (
AutoModelForSequenceClassification,
DataCollatorWithPadding,
View deepspeed_torch_trainer.py
# Minimal Example adapted from https://huggingface.co/docs/transformers/training
import deepspeed
import evaluate
import torch
from datasets import load_dataset
from deepspeed.accelerator import get_accelerator
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoModelForSequenceClassification,
View requirements-release-test.txt
about-time==4.2.1
absl-py==1.4.0
accelerate==0.19.0
adal==1.2.7
aim==3.17.5
aim-ui==3.17.5
aimrecords==0.0.7
aimrocks==0.4.0
aioboto3==11.2.0
aiobotocore==2.5.0
View requirements-repro-nightly.txt
accelerate==0.19.0
adal==1.2.7
aiofiles==22.1.0
aiohttp==3.8.5
aiohttp-cors==0.7.0
aiorwlock==1.3.0
aiosignal==1.3.1
aiosqlite==0.19.0
alabaster==0.7.13
anyio==3.7.1
View requirements-repro-262.txt
accelerate==0.19.0
adal==1.2.7
aiofiles==22.1.0
aiohttp==3.8.5
aiohttp-cors==0.7.0
aiorwlock==1.3.0
aiosignal==1.3.1
aiosqlite==0.19.0
alabaster==0.7.13
anyio==3.7.1