Skip to content

Instantly share code, notes, and snippets.

View ethanyanjiali's full-sized avatar

Ethan Yanjia Li ethanyanjiali

View GitHub Profile
@ethanyanjiali
ethanyanjiali / loss.py
Last active March 14, 2020 21:15
HG Loss
# vanilla version
loss += tf.math.reduce_mean(tf.math.square(labels - output))
# improved version
weights = tf.cast(labels > 0, dtype=tf.float32) * 81 + 1
loss += tf.math.reduce_mean(tf.math.square(labels - output) * weights)
@ethanyanjiali
ethanyanjiali / gaussian.py
Created March 14, 2020 21:06
Generate Gaussian Patch
scale = 1
size = 6 * sigma + 1
x, y = tf.meshgrid(tf.range(0, 6*sigma+1, 1), tf.range(0, 6*sigma+1, 1), indexing='xy')
# the center of the gaussian patch should be 1
center_x = size // 2
center_y = size // 2
# generate this 7x7 gaussian patch
gaussian_patch = tf.cast(tf.math.exp(-(tf.square(x - center_x) + tf.math.square(y - center_y)) / (tf.math.square(sigma) * 2)) * scale, dtype=tf.float32)
@ethanyanjiali
ethanyanjiali / process_scale.py
Created March 14, 2020 20:31
Process MPII Scale
# avoid invisible keypoints whose value are <= 0
masked_keypoint_x = tf.boolean_mask(keypoint_x, keypoint_x > 0)
masked_keypoint_y = tf.boolean_mask(keypoint_y, keypoint_y > 0)
# find \left-most, top, bottom, and right-most keypoints
keypoint_xmin = tf.reduce_min(masked_keypoint_x)
keypoint_xmax = tf.reduce_max(masked_keypoint_x)
keypoint_ymin = tf.reduce_min(masked_keypoint_y)
keypoint_ymax = tf.reduce_max(masked_keypoint_y)
@ethanyanjiali
ethanyanjiali / joint.json
Created March 14, 2020 20:22
One annotation for MPII in JSON
{
"joints_vis": [
1,
1,
1,
1,
1,
1,
1,
1,
@ethanyanjiali
ethanyanjiali / hg.py
Created March 14, 2020 19:33
Single Hourglass Module
def HourglassModule(inputs, order, filters, num_residual):
"""
One Hourglass Module. Usually we stacked multiple of them together.
https://github.com/princeton-vl/pose-hg-train/blob/master/src/models/hg.lua#L3
inputs:
order: The remaining order for HG modules to call itself recursively.
num_residual: Number of residual layers for this HG module.
"""
# Upper branch
@ethanyanjiali
ethanyanjiali / install-horovod.md
Last active September 26, 2019 04:46
Install Horovod with Tensorflow 2.0 on Debian stretch
  • Install gcc/g++ 7+ Add this line to /etc/apt/sources.list
deb http://ftp.de.debian.org/debian buster main 

And then install gcc/g++ 7

sudo apt-get install gcc-7 g++-7
sudo rm /usr/bin/gcc
sudo rm /usr/bin/g++
@ethanyanjiali
ethanyanjiali / vim-setup.md
Last active September 26, 2019 05:17
Vim setup
  • Install Vim and enable Python feature
sudo git clone https://github.com/vim/vim.git && cd vim
sudo ./configure --with-features=huge --enable-multibyte --enable-pythoninterp=yes --with-python-config-dir=/usr/lib/python2.7/config-x86_64-linux-gnu/ --enable-python3interp=yes --with-python3-config-dir=/usr/lib/python3.5/config-3.5m-x86_64-linux-gnu/ --enable-gui=gtk2 --enable-cscope --prefix=/usr/local/
  • Use this as ~/.vimrc
"vundle
set nocompatible
filetype off
def train_step(images_a, images_b, epoch, step):
fake_a2b, fake_b2a, gen_loss_dict = train_generator(images_a, images_b)
fake_b2a_from_pool = fake_pool_b2a.query(fake_b2a)
fake_a2b_from_pool = fake_pool_a2b.query(fake_a2b)
dis_loss_dict = train_discriminator(images_a, images_b, fake_a2b_from_pool, fake_b2a_from_pool)
def train(dataset, epochs):
for epoch in range(checkpoint.epoch+1, epochs+1):
@ethanyanjiali
ethanyanjiali / cyclegan_discriminator_loss.py
Created June 6, 2019 06:02
cyclegan_discriminator_loss
@tf.function
def train_discriminator(images_a, images_b, fake_a2b, fake_b2a):
real_a = images_a
real_b = images_b
with tf.GradientTape() as tape:
# Discriminator A should classify real_a as A
loss_gan_dis_a_real = calc_gan_loss(discriminator_a(real_a, training=True), True)
# Discriminator A should classify generated fake_b2a as not A
loss_gan_dis_a_fake = calc_gan_loss(discriminator_a(fake_b2a, training=True), False)
@ethanyanjiali
ethanyanjiali / cyclegan_discriminator.py
Created June 6, 2019 06:01
cyclegan_discriminator
def make_discriminator_model():
# C64-C128-C256-C512
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(64, (4, 4), strides=(2, 2), padding='same', input_shape=(256, 256, 3)))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Conv2D(128, (4, 4), strides=(2, 2), padding='same', use_bias=False))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))