Skip to content

Instantly share code, notes, and snippets.

View lxuechen's full-sized avatar

Xuechen Li lxuechen

View GitHub Profile
@lxuechen
lxuechen / adaboost.py
Created November 29, 2016 03:29 — forked from tristanwietsma/adaboost.py
AdaBoost Python implementation of the AdaBoost (Adaptive Boosting) classification algorithm.
from __future__ import division
from numpy import *
class AdaBoost:
def __init__(self, training_set):
self.training_set = training_set
self.N = len(self.training_set)
self.weights = ones(self.N)/self.N
self.RULES = []
@lxuechen
lxuechen / pg-pong.py
Created February 8, 2017 17:46 — forked from karpathy/pg-pong.py
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
@lxuechen
lxuechen / bayes_by_backprop.py
Created May 16, 2017 21:29 — forked from rocknrollnerd/bayes_by_backprop.py
Theano implementation of Bayes-by-Backprop algorithm from "Weight uncertainty in neural networks" paper
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams
from lasagne.updates import adam
from lasagne.utils import collect_shared_vars
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
@lxuechen
lxuechen / residual.py
Last active August 3, 2018 19:42
define residual block
class Residual(tf.keras.Model):
def __init__(self, filters):
super(Residual, self).__init__()
self.f = ResidualInner(filters=filters, strides=(1, 1))
self.g = ResidualInner(filters=filters, strides=(1, 1))
def call(self, x, training=True):
x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis)
f_x2 = self.f(x2, training=training)
y1 = f_x2 + x1
@lxuechen
lxuechen / backward.py
Created August 3, 2018 19:32
define backward pass
def backward_grads(self, y, dy, training=True):
dy1, dy2 = dy
y1, y2 = y
with tf.GradientTape() as gtape:
gtape.watch(y1)
gy1 = self.g(y1, training=training)
grads_combined = gtape.gradient(
gy1, [y1] + self.g.trainable_variables, output_gradients=dy2)
dg = grads_combined[1:]
@lxuechen
lxuechen / unit_test.py
Last active August 7, 2018 23:04
fast prototyping with eager
block = Residual()
x = tf.random_normal(shape=(N, C, H, W))
dy = tf.random_normal(shape=(N, C, H, W))
with tf.GradientTape() as tape:
tape.watch(x)
y = block(x)
# Compute true grads
dx_true = tape.gradient(y, x, output_gradients=dy)
# Compute grads from reconstruction
@lxuechen
lxuechen / init_checkpoint.py
Created August 3, 2018 19:37
initialize checkpoint
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer,
learning_rate=learning_rate, global_step=global_step)
@lxuechen
lxuechen / save_and_restore.py
Created August 3, 2018 19:38
save and restore checkpoint
checkpoint.save(file_prefix)
checkpoint.restore(save_path)
@lxuechen
lxuechen / defun.py
Created August 3, 2018 19:39
general defun
tfe = tf.contrib.eager
model.call = tfe.defun(model.call)
model.compute_gradients = tfe.defun(model.compute_gradients)
@lxuechen
lxuechen / defun_optim.py
Created August 3, 2018 19:39
defun optimizer
def apply_gradients(optimizer, gradients, variables, global_step=None):
optimizer.apply_gradients(
zip(gradients, variables), global_step=global_step)
apply_gradients = tfe.defun(apply_gradients)