Skip to content

Instantly share code, notes, and snippets.

def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
@ericjang
ericjang / PairedVideoFramesDataLoader
Last active June 28, 2021 00:25
load images at time t and image at t+3. alternative to just using two augmented views of the same image.
"""
To generate dataset, make folder with each video's frames extracted into subdir. the dino codebase has code to do this easily.
videos = glob.glob(os.path.join(SOURCE_DIR,"*.mp4"))[:num_videos_to_extract]
for i, video in enumerate(videos):
print(i)
directory=os.path.join(OUTPUT_DIR, str(i))
if not os.path.exists(directory):
os.makedirs(directory)
_extract_frames_from_video(inp=video, out=directory)
#
# Qt qmake integration with Google Protocol Buffers compiler protoc
#
# To compile protocol buffers with qt qmake, specify PROTOS variable and
# include this file
#
# Example:
# LIBS += /usr/lib/libprotobuf.so
# PROTOS = a.proto b.proto
# include(protobuf.pri)
@ericjang
ericjang / TensorFlow_Windows.md
Last active March 27, 2021 22:19
Setting up TensorFlow on Windows using Docker.

TensorFlow development environment on Windows using Docker

Here are instructions to set up TensorFlow dev environment on Docker if you are running Windows, and configure it so that you can access Jupyter Notebook from within the VM + edit files in your text editor of choice on your Windows machine.

Installation

First, install https://www.docker.com/docker-toolbox

Since this is Windows, creating the Docker group "docker" is not necessary.

"""
A lightweight experiment logbook for Jupyter/Colab-style ad hoc experiments.
Let's say you generate a plot with Matplotlib and want to re-run your notebook with a
different set of configurations and then compare the resulting plot to the one you saved (to see
if the new configuration is better).
# Saving experiments
f = plt.gcf()
elog.savefig(f,
@ericjang
ericjang / tf_upsample.py
Created July 31, 2016 22:22
alternating convolution & upsampling in TensorFlow
#!/bin/env python
# uses tf-slim from release 0.10.0
import tensorflow as tf
slim = tf.contrib.slim
batch = 13
in_height, in_width, in_channels = 7, 7, 512
@ericjang
ericjang / log_variance.py
Created March 2, 2018 23:43
numerically stable version of log-variance, in TF
def _reduce_logmeanexp(x, axis, epsilon):
"""Numerically-stable (?) implementation of log-mean-exp.
Args:
x: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor)]`.
epsilon: Floating point scalar to avoid log-underflow.
for i in range(num_bijectors):
bijectors.append(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])))
bijectors.append(tfb.Permute(permutation=[1, 0]))
flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])))
class BatchNorm(tfb.Bijector):
def __init__(self, eps=1e-5, decay=0.95, validate_args=False, name="batch_norm"):
super(BatchNorm, self).__init__(
event_ndims=1, validate_args=validate_args, name=name)
self._vars_created = False
self.eps = eps
self.decay = decay
def _create_vars(self, x):
n = x.get_shape().as_list()[1]
loss = -tf.reduce_mean(dist.log_prob(x_samples))
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
NUM_STEPS = int(1e5)
global_step = []
np_losses = []
for i in range(NUM_STEPS):
_, np_loss = sess.run([train_op, loss])
if i % 1000 == 0: