Skip to content

Instantly share code, notes, and snippets.

View kastnerkyle's full-sized avatar

Kyle Kastner kastnerkyle

View GitHub Profile
@kastnerkyle
kastnerkyle / kmeans_coder.py
Last active December 31, 2015 06:29
Updated KMeansCoder from @vene for sklearn 0.14
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# James Bergstra <james.bergstra@umontreal.ca>
# Vlad Niculae <vlad@vene.ro>
#
# License: BSD 3 Clause
# Updated to sklearn 0.14 by Kyle Kastner <kastnerkyle@gmail.com>
import numpy as np
from sklearn.decomposition import PCA
@kastnerkyle
kastnerkyle / mnist_hinge.py
Created December 16, 2013 21:22
Test file for HingeLoss layer
#!/usr/bin/env python
from pylearn2.models import mlp
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.training_algorithms import sgd, learning_rule
from pylearn2.termination_criteria import MonitorBased
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.datasets import mnist
from pylearn2.train import Train
from pylearn2.train_extensions import best_params, window_flip
from pylearn2.space import VectorSpace
@kastnerkyle
kastnerkyle / coders.py
Last active January 1, 2016 12:09
Home to all coders, with plans for KMeansCoder, RandomCoder, and KSVD (maybe others as well)
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# James Bergstra <james.bergstra@umontreal.ca>
# Vlad Niculae <vlad@vene.ro>
# Kyle Kastner <kastnerkyle@gmail.com>
# Samantha Massengill <sgmassengill@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from sklearn.decomposition import PCA
@kastnerkyle
kastnerkyle / matrix_factorization.py
Last active March 27, 2021 12:58
Matrix factorization code related to matrix completion
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
import numpy as np
from scipy import sparse
def minibatch_indices(X, minibatch_size):
minibatch_indices = np.arange(0, len(X), minibatch_size)
minibatch_indices = np.asarray(list(minibatch_indices) + [len(X)])
@kastnerkyle
kastnerkyle / hadamard.py
Last active August 29, 2015 13:57
Hadmard matrix and basis plots
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
#This code is for fun only! Use scipy.linalg.hadamard
import numpy as np
import matplotlib.pyplot as plt
import functools
def memoize(obj):
@kastnerkyle
kastnerkyle / preproc.py
Last active June 28, 2023 19:52
General preprocessing transforms in scikit-learn compatible format
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import gen_batches
from scipy.linalg import eigh
from scipy.linalg import svd
import numpy as np
# From sklearn master
@kastnerkyle
kastnerkyle / dhmm.py
Last active September 4, 2019 18:59
Discrete hmm implementation - for learning purposes only!
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
import numpy as np
class dhmm:
def __init__(self, n_states, initial_prob=None,
n_iter=100, random_seed=1999):
# Initial state probabilities p(s_0)=pi[s_0].
@kastnerkyle
kastnerkyle / gmmhmm.py
Last active March 9, 2023 06:14
GMM-HMM (Hidden markov model with Gaussian mixture emissions) implementation for speech recognition and other uses
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
import scipy.stats as st
import numpy as np
class gmmhmm:
#This class converted with modifications from https://code.google.com/p/hmm-speech-recognition/source/browse/Word.m
def __init__(self, n_states):
self.n_states = n_states
@kastnerkyle
kastnerkyle / procrustes_rotation.py
Last active August 29, 2015 14:02
Procrustes rotation for minimizing RMSE between 2 differently scaled/rotated matrices
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from sklearn.utils import array2d, as_float_array
from sklearn.utils.extmath import svd_flip
from sklearn.utils.testing import assert_array_almost_equal
@kastnerkyle
kastnerkyle / streaming_variance.py
Last active August 29, 2015 14:03
Naive examples of streaming variance calculations
# (C) Kyle Kastner, June 2014
# License: BSD 3 clause
import numpy as np
# Using data from http://www.mathsisfun.com/data/standard-deviation.html
X = np.array([600, 470, 170, 430, 300])
# Showing steps from basic to Welford's and batch
# See http://cpsc.yale.edu/sites/default/files/files/tr222.pdf