Skip to content

Instantly share code, notes, and snippets.

@rsomani95
rsomani95 / timm_ghostnet_yolox.py
Last active July 20, 2022 06:08
Example of how to load in a `timm` architecture with the YOLOX experiment setup. In this file, we're looking specifically at `ghostnet_100`, but this can be extended to any other architecture in `timm` that supports the `features_only` interface
import timm
import torch
import torch.distributed as dist
import torch.nn as nn
from upyog.imports import *
from yolox.exp.yolox_base import Exp as DefaultBaseExp
from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
from yolox.utils import get_local_rank, wait_for_the_master
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@rsomani95
rsomani95 / pil_operators.py
Last active May 21, 2021 01:51
Operators for PIL Images for easily creating grid views and viewing multiple Images side by side.
import fastcore.all as fastcore
import PIL
@fastcore.patch
def __or__(self: PIL.Image.Image, other: PIL.Image.Image):
"Horizontally stack two PIL Images"
assert isinstance(other, PIL.Image.Image)
widths, heights = zip(*(i.size for i in [self, other]))
new_img = PIL.Image.new("RGB", (sum(widths), max(heights)))
@rsomani95
rsomani95 / apply_lut.py
Last active May 27, 2021 07:32
Applying LUTs with PIL
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
img_path = "image.jpg"
lut_path = "lut.cube"
img = Image.open(img_path) # .convert("RGB")
lut = read_lut(lut_path)
# This returns a PIL Image with the LUT applied
import torch, torchvision
from onnxruntime import InferenceSession
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
x = torch.rand(1, 3, 360, 640)
torch.onnx.export(
model,
x, # ONNX requires fixed input size
"test-mask-rcnn-export.onnx",
@rsomani95
rsomani95 / decode_video.py
Created January 8, 2021 10:58
Quickly decode video frame by frame
# !pip install pyav
import av
PATH_VIDEO = "..."
container = av.open(PATH_VIDEO)
video_stream = container.streams.video[0]
video_decoder = container.decode(video_stream)
for frame in video_decoder:
@rsomani95
rsomani95 / draw_sample.py
Last active January 8, 2021 09:38
Icevision -- `draw_sample` with uniform color map and custom font
from icevision.visualize.draw_data import draw_bbox
# import a bunch of other stuff here
cmap_path = "zz_color_map_coco.json"
with open(cmap_path) as f:
COLOR_MAP_COCO = json.load(f)
COLOR_MAP_COCO = {
k: np.array(v).astype(np.float) for k, v in COLOR_MAP_COCO.items()
}
import torch.nn as nn
import torch
class FocalLoss(nn.Module):
# implementation adapted from https://amaarora.github.io/2020/06/29/FocalLoss.html
# paper: https://arxiv.org/abs/1708.02002
"Focal Loss"
def __init__(self, alpha=.25, gamma=2, reduction='none', pos_weight=None):
super(FocalLoss, self).__init__()
self.alpha = torch.tensor([alpha, 1-alpha])#.cuda()
[['color.key.shot.na: 0.9225'],
['color.saturation.neutral: 0.9356'],
['color.theory.analagous: 0.8844'],
['color.tones.warm: 0.8524'],
['shot.angle.low: 0.8402'],
['shot.composition.center: 0.9619'],
['shot.focus.na: 0.7580'],
['shot.framing.wide: 0.8396'],
['shot.level.level: 0.9755'],
['shot.lighting.cast.hard: 0.8004'],
{'color_key': {'color.key.blue': 0.0003,
'color.key.green': 0.0001,
'color.key.luma': 0.0636,
'color.key.matte': 0.0087,
'color.key.shot.na': 0.9225,
'color.key.texture.na': 0.0049},
'color_saturation': {'color.saturation.desaturated': 0.0453,
'color.saturation.neutral': 0.9356,
'color.saturation.pastel': 0.001,
'color.saturation.saturated': 0.018},