Skip to content

Instantly share code, notes, and snippets.

View iaroslav-ai's full-sized avatar

Iaroslav Shcherbatyi iaroslav-ai

View GitHub Profile
import os
from neon.util.argparser import NeonArgparser
from neon.layers import Conv, Pooling, MergeBroadcast, BranchNode, Affine, Tree, Dropout
from neon.layers import GeneralizedCost, Multicost
from neon.initializers import Constant, Xavier
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ArrayIterator
import os
from neon.util.argparser import NeonArgparser
from neon.layers import Conv, Pooling, MergeBroadcast, BranchNode, Affine, Tree, Dropout
from neon.layers import GeneralizedCost, Multicost
from neon.initializers import Constant, Xavier
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ArrayIterator
import os
from neon.util.argparser import NeonArgparser
from neon.layers import Conv, Pooling, MergeBroadcast, BranchNode, Affine, Tree, Dropout
from neon.layers import GeneralizedCost, Multicost
from neon.initializers import Constant, Xavier
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ArrayIterator
@iaroslav-ai
iaroslav-ai / README.md
Last active June 7, 2016 20:38
Cartpole RL experiment. Inspired by nature paper on atari game playing. Requires chainer.

A slightly modified deep Q learning approach is used from this paper. Requires chainer. To reproduce run code below with python 2.7; It will run training and monitor of the environment. Training data and some videos will be saved in "cartpole" folder near the script file.

A slightly modified deep Q learning approach is used from this paper. Requires chainer.

To reproduce run code below with python 2.7; It will run training and monitor of the environment. Training data and some videos will be saved in "pendulum" folder near the script file.

Continuous space is discretized with 11 different actions.

It appears that there are some convergence problems; Maybe better selection of parameters would lead to a better objective value.

@iaroslav-ai
iaroslav-ai / brain.py
Created June 19, 2016 14:47
Estimates the time needed to compute forward pass of neural net of size of human brain (10^15 synapses). Assumes that synapse implements function which can be well approximated by multiplication.
import numpy as np
import theano
from theano import tensor as T
# synapses to compute at once
N = 2 ** 13
M = N
Sym_rep = 16
x = np.random.randn(N).astype('float32')
@iaroslav-ai
iaroslav-ai / main.py
Created March 6, 2017 07:37
Knowledge transfer for sequential model based optimization
from scipy.optimize import minimize, basinhopping, differential_evolution
import numpy as np
from autograd import numpy as np, grad
class SkoptProxy():
def __init__(self, ModelClass, bounds):
self.ModelClass = ModelClass
self.X = []
self.Y = []
from scipy.optimize import minimize, basinhopping, differential_evolution
import numpy as np
from copy import deepcopy
from skopt import Optimizer
from skopt.learning import GaussianProcessRegressor, RandomForestRegressor, ExtraTreesRegressor
from skopt.learning.gaussian_process.kernels import Matern
from skopt import space
from sklearn.svm import SVR
class MultiTaskOptProb():
"""
Comparison of parallel vs sequential optimization,
where constant lie appraoch is used.
"""
from threading import Thread
from copy import deepcopy
from skopt import Optimizer
from skopt.learning import ExtraTreesRegressor, GaussianProcessRegressor
from skopt.space import Real
import numpy as np
from skopt import gp_minimize
from skopt.space import Real
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
import numpy as np
np.random.seed(2)
# a dummy objective with 6 dimensions