Skip to content

Instantly share code, notes, and snippets.

def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
@yzh119
yzh119 / st-gumbel.py
Created January 12, 2018 12:25
ST-Gumbel-Softmax-Pytorch
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).cuda()
return -Variable(torch.log(-torch.log(U + eps) + eps))
@yzh119
yzh119 / latency.txt
Created September 18, 2018 09:42 — forked from jboner/latency.txt
Latency Numbers Every Programmer Should Know
Latency Comparison Numbers (~2012)
----------------------------------
L1 cache reference 0.5 ns
Branch mispredict 5 ns
L2 cache reference 7 ns 14x L1 cache
Mutex lock/unlock 25 ns
Main memory reference 100 ns 20x L2 cache, 200x L1 cache
Compress 1K bytes with Zippy 3,000 ns 3 us
Send 1K bytes over 1 Gbps network 10,000 ns 10 us
Read 4K randomly from SSD* 150,000 ns 150 us ~1GB/sec SSD
@yzh119
yzh119 / convert.py
Created November 12, 2018 04:29
Conver RST file to sphinx-gallery `.py` format
"""
Usage:
python convert.py xxx.rst xxx.py
To convert markdown to sphinx_gallery `.py`, use pandoc to generate a `.rst` text in advance:
pandoc xxx.md --output xxx.rst
python convert.py xxx.rst xxx.py
@yzh119
yzh119 / draw.py
Created December 3, 2018 17:11 — forked from VoVAllen/draw.py
Draw att
# This part for jupyter notebook setting (if you wants to save, don't use this)
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
# import numpy as np
# import matplotlib.pyplot as plt
# plt.rcParams["animation.html"] = "jshtml"
import networkx as nx
from networkx.algorithms import bipartite
@yzh119
yzh119 / sgc-dgl.py
Last active February 20, 2019 12:54
DGL implementation of Simplified Graph Convolution
"""
This code was modified from the GCN implementation in DGL examples.
Simplifying Graph Convolutional Networks
Paper: https://arxiv.org/abs/1902.07153
Code: https://github.com/Tiiiger/SGC
SGC implementation in DGL.
"""
import argparse, time, math
@yzh119
yzh119 / dgl_sage_fp16.py
Last active October 12, 2020 07:03
Training GraphSAGE w/ fp16 in DGL.
"""Training graphsage w/ fp16.
Usage:
python train_full.py --gpu 0 --fp16 --dataset
Note that GradScaler is not acitvated because the model successfully converges
without gradient scaling.
DGL's Message Passing APIs are not compatible with fp16 yet, hence we disabled
@yzh119
yzh119 / dgl-transformer.py
Created December 3, 2020 09:03
Efficient Sparse Transformer implementation with DGL's builtin operators
import dgl
import dgl.ops as ops
import numpy as np
import torch as th
import torch.nn as nn
class FFN(nn.Module):
def __init__(self, d_feat, d_ffn, dropout=0.1):
super().__init__()
self.linear_0 = nn.Linear(d_feat, d_ffn)
@yzh119
yzh119 / quine.c
Created April 2, 2021 05:47
My solution to quine program in C
#include <stdio.h>
char str[] = "#include <stdio.h>%cchar str[] = %c%s%c;%cint main() {%c printf(str, 10, 34, str, 34, 10, 10, 10);%c}";
int main() {
printf(str, 10, 34, str, 34, 10, 10, 10);
}
@yzh119
yzh119 / quine.py
Created April 2, 2021 05:50
My solution to quine program in Python
prog = "prog = {:c}{}{:c}{:c}print(prog.format(34, prog, 34, 10))"
print(prog.format(34, prog, 34, 10))