Skip to content

Instantly share code, notes, and snippets.

@mrgloom
mrgloom / dnn_compare_optims.py
Last active February 9, 2018 18:04 — forked from syhw/dnn_compare_optims.py
comparing SGD vs SAG vs Adadelta vs Adagrad
"""
A deep neural network with or w/o dropout in one file.
"""
import numpy
import theano
import sys
import math
from theano import tensor as T
from theano import shared
@mrgloom
mrgloom / Convolutional Arithmetic.ipynb
Created May 12, 2017 16:44 — forked from akiross/Convolutional Arithmetic.ipynb
Few experiments on how convolution and transposed convolution (deconvolution) should work in tensorflow.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@mrgloom
mrgloom / online_stats.py
Created March 15, 2017 15:12 — forked from kastnerkyle/online_stats.py
Online statistics in numpy
# Author: Kyle Kaster
# License: BSD 3-clause
import numpy as np
def online_stats(X):
"""
Converted from John D. Cook
http://www.johndcook.com/blog/standard_deviation/
"""
@mrgloom
mrgloom / detect_multiscale.cpp
Created May 23, 2016 13:20 — forked from thorikawa/detect_multiscale.cpp
Simple example for CascadeClassifier.detectMultiScale
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
int main () {
Mat img = imread("lena.jpg");
CascadeClassifier cascade;
if (cascade.load("haarcascade_frontalface_alt.xml")) {
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
def main():
mnist = fetch_mldata("MNIST original")
X_all, y_all = mnist.data/255., mnist.target
print("scaling")
X = X_all[:60000, :]
y = y_all[:60000]
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from sklearn.decomposition import RandomizedPCA
from sklearn.datasets import fetch_mldata
from sklearn.utils import shuffle
mnist = fetch_mldata("MNIST original")
X_train, y_train = mnist.data[:60000] / 255., mnist.target[:60000]
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import fmin_l_bfgs_b as bfgs
from scipy.io import loadmat
class params:
'''
A wrapper around weights and biases
for an autoencoder
from numpy import loadtxt, zeros, ones, array, linspace, logspace
from pylab import scatter, show, title, xlabel, ylabel, plot, contour
#Evaluate the linear regression
def compute_cost(X, y, theta):
'''
Comput cost for linear regression
'''
#Number of training samples
@mrgloom
mrgloom / pegasos.py
Created October 13, 2015 16:58 — forked from alextp/pegasos.py
class OnlineLearner(object):
def __init__(self, **kwargs):
self.last_misses = 0.
self.iratio = 0.
self.it = 1.
self.l = kwargs["l"]
self.max_ratio = -np.inf
self.threshold = 500.
def hinge_loss(self, vector, cls, weight):
@mrgloom
mrgloom / rbm.py
Last active September 11, 2015 09:59
Some fairly clean (and fast) code for Restricted Boltzmann machines.
"""
Code for training RBMs with contrastive divergence. Tries to be as
quick and memory-efficient as possible while utilizing only pure Python
and NumPy.
"""
# Copyright (c) 2009, David Warde-Farley
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without