Skip to content

Instantly share code, notes, and snippets.

View mehdidc's full-sized avatar

Mehdi Cherti mehdidc

View GitHub Profile
from nolearn.lasagne import NeuralNet, BatchIterator
from lasagne import layers, nonlinearities, updates, init, objectives
import numpy as np
class EarlyStopping(object):
def __init__(self, patience=100, criterion='valid_loss',
criterion_smaller_is_better=True):
self.patience = patience
if criterion_smaller_is_better is True:
from nolearn.lasagne import NeuralNet, BatchIterator
from lasagne import layers, nonlinearities, updates, init, objectives
from nolearn.lasagne.base import objective
from lasagne.objectives import aggregate
from lasagne.regularization import regularize_layer_params, l2, l1
import numpy as np
from keras.layers import Dense, Input, Dropout
from keras.models import Sequential
from keras.optimizers import Adadelta
from sklearn.datasets import make_blobs
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.regularizers import l2, l1
import matplotlib.pyplot as plt
from keras.layers import Dense, Input, Dropout
from keras.models import Sequential
from keras.optimizers import Adadelta
from sklearn.datasets import make_blobs
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.regularizers import l2, l1
import matplotlib.pyplot as plt
from keras.layers import Dense, Input, Dropout
from keras.models import Sequential
from keras.optimizers import Adadelta
from sklearn.datasets import make_blobs
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
from keras.regularizers import l2, l1
import matplotlib.pyplot as plt
from nolearn.lasagne import NeuralNet, BatchIterator
from lasagne import layers, nonlinearities, updates, init, objectives
import numpy as np
import theano
class EarlyStopping(object):
def __init__(self, patience=100, criterion='valid_loss',
criterion_smaller_is_better=True):
self.patience = patience
from skopt import gp_minimize, forest_minimize, dummy_minimize, gbrt_minimize
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
from skopt.benchmarks import branin
x0 = [[1, 2], [3, 4], [5, 6]]
y0 = map(branin, x0)
res = gp_minimize(branin,
def minibatcher(fn, batchsize=1000):
"""
fn : a function that takes an input and returns an output
batchsize : divide the total input into divisions of size batchsize at most
iterate through all the divisions, call fn, get the results,
then concatenate all the results.
"""
def f(X):
results = []
@mehdidc
mehdidc / gruln.py
Created September 24, 2016 12:21 — forked from udibr/gruln.py
Keras GRU with Layer Normalization
import numpy as np
from keras.layers import GRU, initializations, K
from collections import OrderedDict
class GRULN(GRU):
'''Gated Recurrent Unit with Layer Normalization
Current impelemtation only works with consume_less = 'gpu' which is already
set.
# Arguments
class EnsembleRegressor(object):
def __init__(self, regs=None):
self.regs = regs
def fit(self, X, y):
return self
def predict(self, X, return_std=False):
if return_std:
means = []