Skip to content

Instantly share code, notes, and snippets.

@quentinf00
quentinf00 / pyproject.toml
Last active June 11, 2024 07:19
Programmatic commit
[build-system]
requires = ["setuptools >= 61.0"]
build-backend = "setuptools.build_meta"
[project]
dynamic = ["version"]
name = "lit_commit_cb"
authors=[
{name = "Quentin Febvre", email = "quentin.febvre@gmail.com"},
]
dependencies=[
import tqdm
import torch
import numpy as np
import pandas as pd
def oi(
outgrid_da,
patcher_cls,
obs,
obs_var='ssh',
def log_mem_amp_cp(model, inp, mem_log=None, exp=None, cp_chunks=3):
mem_log = mem_log or []
exp = exp or f'exp_{len(mem_log)}'
hr = []
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
amp_model, optimizer = amp.initialize(model, optimizer)
for idx, module in enumerate(amp_model.modules()):
add_memory_hooks(idx, module, mem_log, exp, hr)
try:
@quentinf00
quentinf00 / resnet_seq.py
Created September 20, 2019 16:52
Sequential version of the resnet to use checkpoint_sequential
# %% Create Sequential version of model
class Flatten(nn.Module):
def forward(self, x):
return torch.flatten(x, 1)
seq_model = nn.Sequential(
model.conv1,
model.bn1,
model.relu,
model.maxpool,
@quentinf00
quentinf00 / log_resnet18_mem.py
Created September 20, 2019 15:33
Log resnet 18 memory consumption during one training itération
# %% Analysis baseline
model = resnet18().cuda()
bs = 128
input = torch.rand(bs, 3, 224, 224).cuda()
mem_log = []
try:
mem_log.extend(log_mem(model, input, exp='baseline'))
@quentinf00
quentinf00 / log_mem_amp.py
Created September 20, 2019 15:05
Log memory with automatic mixed precision
def log_mem_amp(model, inp, mem_log=None, exp=None):
mem_log = mem_log or []
exp = exp or f'exp_{len(mem_log)}'
hr = []
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
amp_model, optimizer = amp.initialize(model, optimizer)
for idx, module in enumerate(amp_model.modules()):
add_memory_hooks(idx, module, mem_log, exp, hr)
@quentinf00
quentinf00 / log_mem_cp.py
Created September 20, 2019 15:04
Log memory with checkpointing
def log_mem_cp(model, inp, mem_log=None, exp=None, cp_chunks=3):
mem_log = mem_log or []
exp = exp or f'exp_{len(mem_log)}'
hr = []
for idx, module in enumerate(model.modules()):
add_memory_hooks(idx, module, mem_log, exp, hr)
try:
out = checkpoint_sequential(model, cp_chunks, inp)
loss = out.sum()
@quentinf00
quentinf00 / pytorch_log_mem.py
Last active September 26, 2019 17:50
utils to log gpu memory
def _get_gpu_mem(synchronize=True, empty_cache=True):
return torch.cuda.memory_allocated(), torch.cuda.memory_cached()
def _generate_mem_hook(handle_ref, mem, idx, hook_type, exp):
def hook(self, *args):
if len(mem) == 0 or mem[-1]["exp"] != exp:
call_idx = 0
else:
from s3_api_raw import S3ApiRaw
from s3_model import S3Model
from schema import Schema
#We define the model
class User(S3Model):
"""
User:
- name: "user"
- schema:
# Then, create a folder for you app:
mkdir my-sls-rest && cd my-sls-rest
# create a virtualenv
virtualenv venv  — python=python3.6
source venv/bin/activate
# create a create a serverlessproject
serverless create — template aws-python3 — name my-sls-rest
serverless plugin install -n serverless-python-requirements