Skip to content

Instantly share code, notes, and snippets.

@mehdidc
Created July 12, 2016 16:01
Show Gist options
  • Save mehdidc/0bcbd32343bd60cc644419284b929090 to your computer and use it in GitHub Desktop.
Save mehdidc/0bcbd32343bd60cc644419284b929090 to your computer and use it in GitHub Desktop.
from nolearn.lasagne import NeuralNet, BatchIterator
from lasagne import layers, nonlinearities, updates, init, objectives
from nolearn.lasagne.base import objective
from lasagne.objectives import aggregate
from lasagne.regularization import regularize_layer_params, l2, l1
import numpy as np
def objective_with_regularization(layers,
loss_function,
target,
aggregate=aggregate,
deterministic=False,
get_output_kw=None):
lambda_l1 = 0
lambda_l2 = 0.00001
regularized_layer_names = ["hidden1", "hidden2", "hidden3"]
regularized_layers = [layers[name] for name in regularized_layer_names]
reg_l2 = regularize_layer_params(regularized_layers, l2)
reg_l1 = regularize_layer_params(regularized_layers, l1)
loss = objective(layers, loss_function, target, aggregate, deterministic, get_output_kw)
if deterministic is True:
return loss + lambda_l1 * reg_l1 + lambda_l2 * reg_l2
else:
return loss
net = NeuralNet(
# Define the architecture here
layers=[
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# Layers parameters:
input_shape=(None, 100), # Number of input features
hidden1_num_units=500, # number of units in 1st hidden layer
hidden1_nonlinearity=nonlinearities.rectify,
hidden1_W=init.GlorotUniform(gain='relu'),
hidden2_num_units=500, # number of units in 2nd hidden layer
hidden2_nonlinearity=nonlinearities.rectify,
hidden2_W=init.GlorotUniform(gain='relu'),
hidden3_num_units=500, # number of units in 3rd hidden layer
hidden3_nonlinearity=nonlinearities.rectify,
hidden3_W=init.GlorotUniform(gain='relu'),
output_num_units=18, # 18 classes
output_W=init.GlorotUniform(),
output_nonlinearity=nonlinearities.softmax,
# objective function
objective=objective_with_regularization,
# Optimization method:
update=updates.adadelta, # The optimization algorithm is Adadelta
update_learning_rate=0.1,
batch_iterator_train=BatchIterator(batch_size=100), # mini-batch size
use_label_encoder=True, # Converts labels of any kind to integers
max_epochs=100, # we want to train this many epochs
verbose=1, # To monitor training at each epoch
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment