Skip to content

Instantly share code, notes, and snippets.

View Kulbear's full-sized avatar
🏳️
not active

Kulbear

🏳️
not active
View GitHub Profile
@Kulbear
Kulbear / interpolate3d.py
Created June 21, 2022 22:29
Trilinear interpolation on a 3D regular grid, implemented with PyTorch.
import tensorflow as tf
import torch
import numpy as np
def gather_nd_torch(params, indices, batch_dim=1):
""" A PyTorch porting of tensorflow.gather_nd
This implementation can handle leading batch dimensions in params, see below for detailed explanation.
The majority of this implementation is from Michael Jungo @ https://stackoverflow.com/a/61810047/6670143
I just ported it compatible to leading batch dimension.
@Kulbear
Kulbear / gather_nd_pytorch.py
Created June 21, 2022 08:45
A PyTorch porting of tensorflow.gather_nd with batch_dim supported.
import torch
import tensorflow as tf
import time
import numpy as np
def gather_nd_torch(params, indices, batch_dim=1):
""" A PyTorch porting of tensorflow.gather_nd
This implementation can handle leading batch dimensions in params, see below for detailed explanation.
@Kulbear
Kulbear / check_convex.py
Created December 22, 2019 18:51 — forked from mblondel/check_convex.py
A small script to get numerical evidence that a function is convex
# Authors: Mathieu Blondel, Vlad Niculae
# License: BSD 3 clause
import numpy as np
def _gen_pairs(gen, max_iter, max_inner, random_state, verbose):
rng = np.random.RandomState(random_state)
# if tuple, interpret as randn
@Kulbear
Kulbear / 2048.py
Last active July 28, 2019 21:12
2048
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Authors: Ji Yang <jyang7@ualberta.ca>
# License: MIT
# Version: 1.0.0
# Last Updated: May 14, 2017
import random
import sys
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, logit, target):
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + \
((-max_val).exp() + (-logit - max_val).exp()).log()
@Kulbear
Kulbear / model.py
Last active December 22, 2018 07:15
def get_senet50(config):
model = se_resnext50_32x4d(pretrained='imagenet')
# print(model.last_linear)
# print(model.dropout)
model.layer0.conv1 = nn.Conv2d(config.channels, 64, 3, stride=2, padding=1, bias=False)
model.avg_pool = nn.AdaptiveAvgPool2d(1)
model.dropout = None
model.last_linear = nn.Sequential(
nn.BatchNorm1d(2048),
nn.Dropout(0.5),
@Kulbear
Kulbear / gist:f58478d412e3f37b88dcd0521eb3abb8
Created November 8, 2018 18:27
multi_weighted_logloss.py
def multi_weighted_logloss(y_ohe, y_p):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1-1e-15)
# Transform to log
class Node:
def __init__(self, val):
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
@Kulbear
Kulbear / autoencoder.py
Created February 18, 2018 22:57 — forked from gabrieleangeletti/autoencoder.py
Denoising Autoencoder implementation using TensorFlow.
import tensorflow as tf
import numpy as np
import os
import zconfig
import utils
class DenoisingAutoencoder(object):
""" Implementation of Denoising Autoencoders using TensorFlow.
@Kulbear
Kulbear / run.py
Last active October 8, 2017 21:42
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import numpy as np
import timeit
import tensorflow as tf
from pprint import pformat
mnist = read_data_sets("data", one_hot=False)
NUM_CLASS = 10
STEP = 200