Skip to content

Instantly share code, notes, and snippets.

View samson-wang's full-sized avatar

WANG Chenxi samson-wang

  • OceanDark
View GitHub Profile
@samson-wang
samson-wang / gpu_mem.json
Created June 29, 2017 03:27
gpu metrics for telegraf and chronograf
{
"id": "20850d4a-310a-4ab7-b33d-d33528b832af",
"measurement": "gpu_mycollector",
"app": "gpu_mem",
"cells": [{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "da71e439-25ac-4bf7-9276-697ff69e1604",
@samson-wang
samson-wang / fxxk_alimei.py
Last active October 24, 2017 10:28
阿里妹1024
M_C = ['.-', '-...', '-.-.', '-..', '.', '..-.', '--.', '....', '..', '.---', '-.-', '.-..', '--', '-.', '---', '.--.', '--.-', '.-.', '...', '-', '..-', '...-', '.--', '-..-', '-.--', '--..']
A_C = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
M_C += ['.----', '..---', '...--', '....-', '.....', '-....', '--...', '---..', '----.', '-----']
A_C += ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
BASE_DICT = dict(zip(M_C, A_C))
TASH = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
@samson-wang
samson-wang / evalCOCO.m
Created February 11, 2018 09:28
A work-around for Realtime_Multi-Person_Pose_Estimation testing on coco dataset
addpath('util/jsonlab/');
addpath('src');
addpath('util');
addpath('util/ojwoodford-export_fig-5735e6d/');
%addpath('/data/Repo/Realtime_Multi-Person_Pose_Estimation/training/dataset/COCO/coco/MatlabAPI');
fid = fopen('../val2014_flist.2k.csv');
data=textscan(fid,'%f %s','delimiter',',');
fclose(fid);
display(data);
for i = 1:length(data{1})
@samson-wang
samson-wang / video_cap.py
Last active March 23, 2018 11:30
capture from video
import cv2
import sys
import numpy as np
import imutils
vids = [0] if len(sys.argv) < 2 else map(int, sys.argv[1].split(','))
caps = [cv2.VideoCapture(_) for _ in vids]
scale = .5
@samson-wang
samson-wang / rfs.md
Last active March 3, 2024 12:15
ResNet Receptive Field Size
layer resnet18 resnet34 resnet50 resnet101
conv1 7 7 7 7
maxpool 11 11 11 11
layer1 43 59 35 35
layer2 99 179 91 91
layer3 211 547 267 811
layer4 435 899 427 971
import torch
from torch.nn import Conv2d, Sequential
import time
from torchvision import models
def score(arch, batch_size=32, num_batches=10):
model = models.__dict__[arch]().cuda()
data = torch.rand((batch_size, 3, 224, 224)).cuda()
@samson-wang
samson-wang / result.log
Last active February 19, 2019 09:56
float multiplication test.
Minimum float positive value: 1.175494E-38
A: 0.403069, HEX: 0x3ece5f18
A: 4.0306925774e-01, HEX: 0x3ece5f18
A: 3.7674255964e-06, HEX: 0x367cd3e1
A: 3.8132049561e+01, HEX: 0x42188738
real 0m0.057s
user 0m0.056s
sys 0m0.000s
Minimum float positive value: 1.175494E-38
@samson-wang
samson-wang / test_mp.py
Created January 31, 2020 12:01
multi-processing
import multiprocessing as mp
import time
import random
todo = mp.Value('i', 0)
done = mp.Value('i', 0)
idx = mp.Value('i', 0)
finish = mp.Value('i', 0)
def worker(todo, done, idx, finish):
@samson-wang
samson-wang / focal_loss.py
Last active April 2, 2024 11:56
A really simple pytorch implementation of focal loss for both sigmoid and softmax predictions.
import torch
from torch.nn.functional import log_softmax
def sigmoid_focal_loss(logits, target, gamma=2., alpha=0.25):
num_classes = logits.shape[1]
dtype = target.dtype
device = target.device
class_range = torch.arange(0, num_classes, dtype=dtype, device=device).unsqueeze(0)
t = target.unsqueeze(1)
import torch
import torch.nn as nn
import time
class Net(nn.Module):
def __init__(self, ch=3):
super(Net, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(ch, 8, 3, padding=1),
nn.BatchNorm2d(8),