Skip to content

Instantly share code, notes, and snippets.

View BloodAxe's full-sized avatar
🇺🇦
Keep calm and love Ukraine

Eugene Khvedchenya BloodAxe

🇺🇦
Keep calm and love Ukraine
View GitHub Profile
from collections import OrderedDict
from functools import partial
from typing import Union, List, Dict, Tuple, Type
from pytorch_toolbelt.modules import (
conv1x1,
UnetBlock,
ACT_RELU,
ABN,
ACT_SWISH,
import torch
def main():
torch.autograd.detect_anomaly()
...
# Rest of the training code
# OR
class MyNumericallyUnstableLoss(nn.Module):
# https://github.com/BloodAxe/Kaggle-2020-Alaska2
callbacks += [
CriterionCallback(
input_key=INPUT_TRUE_MODIFICATION_FLAG,
output_key=OUTPUT_PRED_MODIFICATION_FLAG,
criterion_key="bce"
),
CriterionCallback(
input_key=INPUT_TRUE_MODIFICATION_TYPE,
# https://github.com/BloodAxe/Kaggle-2020-Alaska2/blob/master/alaska2/dataset.py#L373
class TrainingValidationDataset(Dataset):
def __init__(
self,
images: Union[List, np.ndarray],
targets: Optional[Union[List, np.ndarray]],
quality: Union[List, np.ndarray],
bits: Optional[Union[List, np.ndarray]],
transform: Union[A.Compose, A.BasicTransform],
features: List[str],
# https://github.com/BloodAxe/Kaggle-2020-Alaska2/blob/master/alaska2/models/timm.py#L104
def forward(self, **kwargs):
x = kwargs[self.input_key]
x = self.rgb_bn(x)
x = self.encoder.forward_features(x)
embedding = self.pool(x)
result = {
OUTPUT_PRED_MODIFICATION_FLAG: self.flag_classifier(self.drop(embedding)),
OUTPUT_PRED_MODIFICATION_TYPE: self.type_classifier(self.drop(embedding)),
class RetinaNet(nn.Module):
RETINA_NET_OUTPUT_BBOXES = "bboxes"
RETINA_NET_OUTPUT_SCORES = "scores"
...
def forward(self, image):
x = self.encoder(image)
x = self.decoder(x)
bboxes, scores = self.head(x)
# Bad practice, don't return tuple
class RetinaNet(nn.Module):
...
def forward(self, image):
x = self.encoder(image)
x = self.decoder(x)
bboxes, scores = self.head(x)
return bboxes, scores
class MySegmentationDataset(Dataset):
...
def __getitem__(self, index):
image = cv2.imread(self.images[index])
target = cv2.imread(self.masks[index])
# No data normalization and type casting here
return torch.from_numpy(image).permute(2,0,1).contiguous(),
torch.from_numpy(target).permute(2,0,1).contiguous()
class RAMDataset(Dataset):
def __init__(image_fnames, targets):
self.targets = targets
self.images = []
for fname in tqdm(image_fnames, desc="Loading files in RAM"):
with open(fname, "rb") as f:
self.images.append(f.read())
def __len__(self):
return len(self.targets)
def test_loss_profiling():
loss = nn.BCEWithLogitsLoss()
with torch.autograd.profiler.profile(use_cuda=True) as prof:
input = torch.randn((8, 1, 128, 128)).cuda()
input.requires_grad = True
target = torch.randint(1, (8, 1, 128, 128)).cuda().float()
for i in range(10):
l = loss(input, target)