Skip to content

Instantly share code, notes, and snippets.

Avatar
😎
Happy Coding All the Day!

Kulbear

😎
Happy Coding All the Day!
View GitHub Profile
@Kulbear
Kulbear / check_convex.py
Created Dec 22, 2019 — forked from mblondel/check_convex.py
A small script to get numerical evidence that a function is convex
View check_convex.py
# Authors: Mathieu Blondel, Vlad Niculae
# License: BSD 3 clause
import numpy as np
def _gen_pairs(gen, max_iter, max_inner, random_state, verbose):
rng = np.random.RandomState(random_state)
# if tuple, interpret as randn
View focalloss.py
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, logit, target):
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + \
((-max_val).exp() + (-logit - max_val).exp()).log()
View model.py
def get_senet50(config):
model = se_resnext50_32x4d(pretrained='imagenet')
# print(model.last_linear)
# print(model.dropout)
model.layer0.conv1 = nn.Conv2d(config.channels, 64, 3, stride=2, padding=1, bias=False)
model.avg_pool = nn.AdaptiveAvgPool2d(1)
model.dropout = None
model.last_linear = nn.Sequential(
nn.BatchNorm1d(2048),
nn.Dropout(0.5),
View gist:f58478d412e3f37b88dcd0521eb3abb8
def multi_weighted_logloss(y_ohe, y_p):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1-1e-15)
# Transform to log
View linkedlist.py
class Node:
def __init__(self, val):
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
@Kulbear
Kulbear / autoencoder.py
Created Feb 18, 2018 — forked from gabrieleangeletti/autoencoder.py
Denoising Autoencoder implementation using TensorFlow.
View autoencoder.py
import tensorflow as tf
import numpy as np
import os
import zconfig
import utils
class DenoisingAutoencoder(object):
""" Implementation of Denoising Autoencoders using TensorFlow.
View run.py
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import numpy as np
import timeit
import tensorflow as tf
from pprint import pformat
mnist = read_data_sets("data", one_hot=False)
NUM_CLASS = 10
STEP = 200
View histEq.m
% Author: Ji Yang
%% Description
% A simple implementation of histogram equalization.
%
%% Note
% Histogram equalization is a technique for adjusting image intensities
% to enhance contrast.
%
View ruozhi.py
# Uses python3
import sys
from collections import namedtuple
Segment = namedtuple('Segment', 'start end')
def optimal_points(segments):
points = []
segments_by_end = sorted(segments, key=lambda x: x.end)
@Kulbear
Kulbear / resume.py
Created Jul 1, 2017 — forked from dongweiming/resume.py
Python版本简历
View resume.py
#/usr/bin/env python
# coding=utf-8
import random
import re
def color(messages):
color = '\x1B[%d;%dm' % (1,random.randint(30,37))
return '%s %s\x1B[0m' % (color,messages)
You can’t perform that action at this time.