Skip to content

Instantly share code, notes, and snippets.

@ericjang
ericjang / tf_upsample.py
Created July 31, 2016 22:22
alternating convolution & upsampling in TensorFlow
#!/bin/env python
# uses tf-slim from release 0.10.0
import tensorflow as tf
slim = tf.contrib.slim
batch = 13
in_height, in_width, in_channels = 7, 7, 512
@ericjang
ericjang / leibniz.py
Last active August 16, 2016 17:45
Compute's Pi using Leibniz formula
r = 0.0
for n in range(1000000):
r += (-1.0)**n/(2.0*n+1.0)
print(4*r)
@ericjang
ericjang / dropTheBase.py
Created April 29, 2016 21:03
audio-driven keyframes in Maya
# Author: Eric Jang
# 2013-Jul-17
# The problem with using audioWave + time nodes + audioWave bonus tool in Maya
# to drive animation is that we can't really do spectral analysis (high pass/low pass filters)
# short of implementing it using hypershade nodes, so we can't get really fine-tuned animation
# ... but Python is good for this!
# caveat: this was coded up in one night so it may be unstable. Use with caution.
# future work: instead of just the simple amplitude, we can perform FFT/spectral analysis to extract
#
# Qt qmake integration with Google Protocol Buffers compiler protoc
#
# To compile protocol buffers with qt qmake, specify PROTOS variable and
# include this file
#
# Example:
# LIBS += /usr/lib/libprotobuf.so
# PROTOS = a.proto b.proto
# include(protobuf.pri)

#Brown CS Potions Textbook:

  • List available printers: lpstat -p -d
  • removing something from gnome menubar: windows + alt + right click -> option to remove
  • Screen Recording: Get window id: xwininfo -display :0, then use recordmydesktop
  • Find out who's logged in: who
  • GTX970 GPUs: cslab6a-6h, gemini, pastorale, pokemon
  • penny has a 550 GTX Ti GPU
  • Makes application full screen: F11 key
@ericjang
ericjang / KL.lua
Created March 19, 2016 17:46
KL Gaussian
-- torch implementation of KL between Gaussians
self.output = -1/2(
+ torch.cmul(iqv, p.sigma):sum() -- trace term, tr(\Sigma_q^{-1} * \Sigma_p)
+ torch.cmul(diff:clone():pow(2), iqv):sum() -- difference in means, (\mu_q-\mu_p)^T\Sigma_q^{-1}(\mu_q-\mu_p)
- ndim -- k
+ torch.log(q.sigma):sum() - torch.log(p.sigma):sum() -- ratio of determinants log |\Sigma_q| / |\Sigma_p|
)
def binary_crossentropy(t,o):
return -(t*tf.log(o+eps) + (1.0-t)*tf.log(1.0-o+eps))
# reconstruction term appears to have been collapsed down to a single scalar value (rather than one per item in minibatch)
x_recons=tf.nn.sigmoid(cs[-1])
# after computing binary cross entropy, sum across features then take the mean of those sums across minibatches
Lx=tf.reduce_sum(binary_crossentropy(x,x_recons),1) # reconstruction term
Lx=tf.reduce_mean(Lx)
def write_no_attn(h_dec):
with tf.variable_scope("write",reuse=DO_SHARE):
return linear(h_dec,img_size)
def write_attn(h_dec):
with tf.variable_scope("writeW",reuse=DO_SHARE):
w=linear(h_dec,write_size) # batch x (write_n*write_n)
N=write_n
w=tf.reshape(w,[batch_size,N,N])
Fx,Fy,gamma=attn_window("write",h_dec,write_n)
def decode(state,input):
with tf.variable_scope("decoder",reuse=DO_SHARE):
return lstm_dec(input, state)
def sampleQ(h_enc):
"""
Samples Zt ~ normrnd(mu,sigma) via reparameterization trick for normal dist
mu is (batch,z_size)
"""
with tf.variable_scope("mu",reuse=DO_SHARE):
mu=linear(h_enc,z_size)
with tf.variable_scope("sigma",reuse=DO_SHARE):
logsigma=linear(h_enc,z_size)
sigma=tf.exp(logsigma)