Skip to content

Instantly share code, notes, and snippets.

View KeremTurgutlu's full-sized avatar
:octocat:
Having Fun

Kerem Turgutlu KeremTurgutlu

:octocat:
Having Fun
View GitHub Profile
@KeremTurgutlu
KeremTurgutlu / unet_down
Created April 19, 2018 01:42
unet down block in pytorch
View unet_down
# a sample down block
def make_conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=1):
return [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
self.down1 = nn.Sequential(
*make_conv_bn_relu(in_channels, 64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(64, 64, kernel_size=3, stride=1, padding=1 ),
@KeremTurgutlu
KeremTurgutlu / unet_up
Created April 19, 2018 01:45
unet up block
View unet_up
# a sample up block
def make_conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=1):
return [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
self.up4 = nn.Sequential(
*make_conv_bn_relu(128,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(64,64, kernel_size=3, stride=1, padding=1 )
@KeremTurgutlu
KeremTurgutlu / nn_interpolate.py
Last active May 22, 2023 18:19
Nearest Neighbor Interpolation in Numpy
View nn_interpolate.py
from collections import Counter
def nn_interpolate(A, new_size):
"""
Nearest Neighbor Interpolation, Step by Step
"""
# get sizes
old_size = A.shape
# calculate row and column ratios
View cp_decomposition
def construct(A,B,C):
"""
Given Matrices A, B, C construct 3D Tensor
A : i, r
B: j, r
C : k, r
"""
X_tilde = 0
r = A.shape[1]
for i in range(r):
@KeremTurgutlu
KeremTurgutlu / models.py
Created May 6, 2019 07:17
Ventricle Models
View models.py
from fastai.vision import *
import math
__all__ = ['MeshNet', 'VolumetricUnet', 'conv_relu_bn_drop', 'res3dmodel', 'get_total_params',
'VolumetricResidualUnet', 'model_dict', 'experiment_model_dict', 'one_by_one_conv',
'model_split_dict']
####################
## GET MODELS ##
View elr_plus.py
from fastai.vision.all import *
from torch.distributions import Beta
from copy import deepcopy
__all__ = ["ELR", "ELR_plusA", "ELR_plusB"]
class ELR(Callback):
'''
The selected values are β = 0.7 and λ = 3 for symmetric noise, β = 0.9 and λ = 1 for
@KeremTurgutlu
KeremTurgutlu / ddp_batch_all_gather.py
Last active September 20, 2023 00:57
Debugging: Distributed InfoNCE Loss
View ddp_batch_all_gather.py
# CLIP contrastive loss is calculated all the negative batch samples from all the GPUs
# How to implement that?
# For more info: https://github.com/openai/CLIP/issues/29
import os
import sys
import tempfile
import torch
import torch.distributed as dist
import torch.nn as nn
View basic_batch_all_gather.py
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import random
@KeremTurgutlu
KeremTurgutlu / distributed_wandb.py
Created March 2, 2021 22:54
Fastai WANDB Callback with DDP
View distributed_wandb.py
@call_parse
def main(
size: Param("Image resolution", int)=224,
bs: Param("Batch Size", int)=128,
epochs: Param("Number of epochs for training", int)=1,
lr: Param("Learning rate for training", float)=5e-5):
WANDB = True
# start wandb
@KeremTurgutlu
KeremTurgutlu / zero_training.py
Created March 11, 2021 01:53
ZeRO optimizer example
View zero_training.py
import wandb
from fastai.callback.wandb import WandbCallback
from fastai.distributed import *
torch.backends.cudnn.benchmark = True
from zero_optimizer import ZeroRedundancyOptimizer
@patch
def after_batch(self: WandbCallback):