Skip to content

Instantly share code, notes, and snippets.

Avatar

Eric Jang ericjang

View GitHub Profile
View elogger.py
"""
A lightweight experiment logbook for Jupyter/Colab-style ad hoc experiments.
Let's say you generate a plot with Matplotlib and want to re-run your notebook with a
different set of configurations and then compare the resulting plot to the one you saved (to see
if the new configuration is better).
# Saving experiments
f = plt.gcf()
elog.savefig(f,
@ericjang
ericjang / log_variance.py
Created Mar 2, 2018
numerically stable version of log-variance, in TF
View log_variance.py
def _reduce_logmeanexp(x, axis, epsilon):
"""Numerically-stable (?) implementation of log-mean-exp.
Args:
x: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor)]`.
epsilon: Floating point scalar to avoid log-underflow.
View nf2_chain.py
for i in range(num_bijectors):
bijectors.append(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])))
bijectors.append(tfb.Permute(permutation=[1, 0]))
flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])))
View nf2_batchnorm.py
class BatchNorm(tfb.Bijector):
def __init__(self, eps=1e-5, decay=0.95, validate_args=False, name="batch_norm"):
super(BatchNorm, self).__init__(
event_ndims=1, validate_args=validate_args, name=name)
self._vars_created = False
self.eps = eps
self.decay = decay
def _create_vars(self, x):
n = x.get_shape().as_list()[1]
View nf1_opt.py
loss = -tf.reduce_mean(dist.log_prob(x_samples))
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
NUM_STEPS = int(1e5)
global_step = []
np_losses = []
for i in range(NUM_STEPS):
_, np_loss = sess.run([train_op, loss])
if i % 1000 == 0:
View nf1_chain.py
d, r = 2, 2
DTYPE = tf.float32
bijectors = []
num_layers = 6
for i in range(num_layers):
with tf.variable_scope('bijector_%d' % i):
V = tf.get_variable('V', [d, r], dtype=DTYPE) # factor loading
shift = tf.get_variable('shift', [d], dtype=DTYPE) # affine shift
L = tf.get_variable('L', [d * (d + 1) / 2],
dtype=DTYPE) # lower triangular
View nf1_prelu.py
# quite easy to interpret - multiplying by alpha causes a contraction in volume.
class LeakyReLU(tfb.Bijector):
def __init__(self, alpha=0.5, validate_args=False, name="leaky_relu"):
super(LeakyReLU, self).__init__(
event_ndims=1, validate_args=validate_args, name=name)
self.alpha = alpha
def _forward(self, x):
return tf.where(tf.greater_equal(x, 0), x, self.alpha * x)
View nf1_imports.py
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
tfd = tf.contrib.distributions
tfb = tfd.bijectors
View nf1_base_dist.py
base_dist = tfd.MultivariateNormalDiag(loc=tf.zeros([2], tf.float32))
View nf1_target.py
batch_size=512
x2_dist = tfd.Normal(loc=0., scale=4.)
x2_samples = x2_dist.sample(batch_size)
x1 = tfd.Normal(loc=.25 * tf.square(x2_samples),
scale=tf.ones(batch_size, dtype=tf.float32))
x1_samples = x1.sample()
x_samples = tf.stack([x1_samples, x2_samples], axis=1)
You can’t perform that action at this time.