Skip to content

Instantly share code, notes, and snippets.

Avatar
:octocat:
Having Fun

Kerem Turgutlu KeremTurgutlu

:octocat:
Having Fun
View GitHub Profile
View reddit_comments.tsv
We can make this file beautiful and searchable if this error is corrected: No tabs found in this TSV file in line 0.
TsvHttpData-1.0
https://files.pushshift.io/reddit/comments/RC_2005-12.zst
@KeremTurgutlu
KeremTurgutlu / ema_swa.py
Last active July 26, 2022 03:10
EMA and SWA callbacks for different model averaging techniques
View ema_swa.py
from fastai.vision.all import *
__all__ = ["EMA", "SWA"]
class EMA(Callback):
"https://fastai.github.io/timmdocs/training_modelEMA"
order,run_valid=5,False
def __init__(self, decay=0.9999):
super().__init__()
self.decay = decay
View train_sam.py
from fastai.vision.all import *
from torch.cuda.amp import autocast, GradScaler
from torch.cuda.amp.grad_scaler import _refresh_per_optimizer_state
from sam import SAM
class FastaiSched:
def __init__(self, optimizer, max_lr):
self.optimizer = optimizer
self.lr_sched = combine_scheds([0.1,0.9], [SchedLin(1e-8,max_lr), SchedCos(max_lr,1e-8)])
self.update(0)
@KeremTurgutlu
KeremTurgutlu / zero_training.py
Created March 11, 2021 01:53
ZeRO optimizer example
View zero_training.py
import wandb
from fastai.callback.wandb import WandbCallback
from fastai.distributed import *
torch.backends.cudnn.benchmark = True
from zero_optimizer import ZeroRedundancyOptimizer
@patch
def after_batch(self: WandbCallback):
@KeremTurgutlu
KeremTurgutlu / distributed_wandb.py
Created March 2, 2021 22:54
Fastai WANDB Callback with DDP
View distributed_wandb.py
@call_parse
def main(
size: Param("Image resolution", int)=224,
bs: Param("Batch Size", int)=128,
epochs: Param("Number of epochs for training", int)=1,
lr: Param("Learning rate for training", float)=5e-5):
WANDB = True
# start wandb
View basic_batch_all_gather.py
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import random
@KeremTurgutlu
KeremTurgutlu / ddp_batch_all_gather.py
Last active September 20, 2023 00:57
Debugging: Distributed InfoNCE Loss
View ddp_batch_all_gather.py
# CLIP contrastive loss is calculated all the negative batch samples from all the GPUs
# How to implement that?
# For more info: https://github.com/openai/CLIP/issues/29
import os
import sys
import tempfile
import torch
import torch.distributed as dist
import torch.nn as nn
View elr_plus.py
from fastai.vision.all import *
from torch.distributions import Beta
from copy import deepcopy
__all__ = ["ELR", "ELR_plusA", "ELR_plusB"]
class ELR(Callback):
'''
The selected values are β = 0.7 and λ = 3 for symmetric noise, β = 0.9 and λ = 1 for
@KeremTurgutlu
KeremTurgutlu / models.py
Created May 6, 2019 07:17
Ventricle Models
View models.py
from fastai.vision import *
import math
__all__ = ['MeshNet', 'VolumetricUnet', 'conv_relu_bn_drop', 'res3dmodel', 'get_total_params',
'VolumetricResidualUnet', 'model_dict', 'experiment_model_dict', 'one_by_one_conv',
'model_split_dict']
####################
## GET MODELS ##
View cp_decomposition
def construct(A,B,C):
"""
Given Matrices A, B, C construct 3D Tensor
A : i, r
B: j, r
C : k, r
"""
X_tilde = 0
r = A.shape[1]
for i in range(r):