Skip to content

Instantly share code, notes, and snippets.

View lxuechen's full-sized avatar

Xuechen Li lxuechen

View GitHub Profile
@lxuechen
lxuechen / adaboost.py
Created November 29, 2016 03:29 — forked from tristanwietsma/adaboost.py
AdaBoost Python implementation of the AdaBoost (Adaptive Boosting) classification algorithm.
from __future__ import division
from numpy import *
class AdaBoost:
def __init__(self, training_set):
self.training_set = training_set
self.N = len(self.training_set)
self.weights = ones(self.N)/self.N
self.RULES = []
@lxuechen
lxuechen / pg-pong.py
Created February 8, 2017 17:46 — forked from karpathy/pg-pong.py
Training a Neural Network ATARI Pong agent with Policy Gradients from raw pixels
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
@lxuechen
lxuechen / bayes_by_backprop.py
Created May 16, 2017 21:29 — forked from rocknrollnerd/bayes_by_backprop.py
Theano implementation of Bayes-by-Backprop algorithm from "Weight uncertainty in neural networks" paper
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams
from lasagne.updates import adam
from lasagne.utils import collect_shared_vars
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
@lxuechen
lxuechen / backward.py
Created August 3, 2018 19:32
define backward pass
def backward_grads(self, y, dy, training=True):
dy1, dy2 = dy
y1, y2 = y
with tf.GradientTape() as gtape:
gtape.watch(y1)
gy1 = self.g(y1, training=training)
grads_combined = gtape.gradient(
gy1, [y1] + self.g.trainable_variables, output_gradients=dy2)
dg = grads_combined[1:]
@lxuechen
lxuechen / init_checkpoint.py
Created August 3, 2018 19:37
initialize checkpoint
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer,
learning_rate=learning_rate, global_step=global_step)
@lxuechen
lxuechen / save_and_restore.py
Created August 3, 2018 19:38
save and restore checkpoint
checkpoint.save(file_prefix)
checkpoint.restore(save_path)
@lxuechen
lxuechen / defun.py
Created August 3, 2018 19:39
general defun
tfe = tf.contrib.eager
model.call = tfe.defun(model.call)
model.compute_gradients = tfe.defun(model.compute_gradients)
@lxuechen
lxuechen / defun_optim.py
Created August 3, 2018 19:39
defun optimizer
def apply_gradients(optimizer, gradients, variables, global_step=None):
optimizer.apply_gradients(
zip(gradients, variables), global_step=global_step)
apply_gradients = tfe.defun(apply_gradients)
@lxuechen
lxuechen / init_ds.py
Created August 3, 2018 19:40
initialize dataset object
dataset = tf.data.TFRecordDataset(filename)
dataset = dataset.repeat(epochs).map(parser).batch(batch_size)
@lxuechen
lxuechen / loop_ds.py
Created August 3, 2018 19:41
simple looping over dataset in eager
for image, label in dataset:
logits = model(image, training=True)
...