Skip to content

Instantly share code, notes, and snippets.

class XGBQuantile(XGBRegressor):
def __init__(self,quant_alpha=0.95,quant_delta = 1.0,quant_thres=1.0,quant_var =1.0,base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=1, nthread=None, objective='reg:linear', random_state=0,reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,silent=True, subsample=1):
self.quant_alpha = quant_alpha
self.quant_delta = quant_delta
self.quant_thres = quant_thres
self.quant_var = quant_var
super().__init__(base_score=base_score, booster=booster, colsample_bylevel=colsample_bylevel,
import numpy as np
np.random.seed(1)
def f(x):
"""The function to predict."""
return x *np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
@benoitdescamps
benoitdescamps / blog-metalearning-reptile.py
Last active September 12, 2020 18:21
Pytorch implementation of Reptile as Ravi, et.al. (https://openreview.net/pdf?id=rJY0-Kcll)
class Reptile:
"""
Repile-optimization as described by Ravi,et.al. (https://openreview.net/pdf?id=rJY0-Kcl)
"""
def __init__(self,
model:torch.nn.Module,
metalearners:List[MetaLearner]):
self.n_tasks = len(metalearners)
self.model = model
self.metalearners = metalearners
class MetaLearner:
"""
This is nothing more than a regular learning flow. However, we create this
class, as we plan on using separate (meta-)learners for each task.
"""
def __init__(self,
model:torch.nn.Module,
loss_fn:Callable,
optimizer):
class PowerSign(optimizer.Optimizer):
"""Implementation of PowerSign.
See [Bello et. al., 2017](https://arxiv.org/abs/1709.07417)
@@__init__
"""
def __init__(self, learning_rate=0.001,alpha=0.01,beta=0.5, use_locking=False, name="PowerSign"):
super(PowerSign, self).__init__(use_locking, name)
self._lr = learning_rate
self._alpha = alpha
self._beta = beta
@benoitdescamps
benoitdescamps / randomSearch_RandomGridBuilder
Created May 11, 2018 21:44
code snippet for Hyperparameters (part II): Random Search on Spark
class RandomGridBuilder(n: Int) {
private val paramDistr = mutable.Map.empty[Param[_],Any]
def addDistr[T](param: Param[T], distr: Any ): this.type = distr match {
case _ : Rand[_] => {paramDistr.put(param, distr)
this}
case _ : Array[_] => { paramDistr.put(param, distr)
this}
case _ => throw new NotImplementedError("Distribution should be of type breeze.stats.distributions.Rand or an Array")
def RosenbrockOpt(optimizer,MAX_EPOCHS = 4000, MAX_STEP = 100):
'''
returns distance of each step*MAX_STEP w.r.t minimum (1,1)
'''
x1_data = tf.Variable(initial_value=tf.random_uniform([1], minval=-3, maxval=3,seed=0),name='x1')
x2_data = tf.Variable(initial_value=tf.random_uniform([1], minval=-3, maxval=3,seed=1), name='x2')
y = tf.add(tf.pow(tf.subtract(1.0, x1_data), 2.0),
tf.multiply(100.0, tf.pow(tf.subtract(x2_data, tf.pow(x1_data, 2.0)), 2.0)), 'y')
with tf.train.MonitoredTrainingSession(master=server.target,\
is_chief=is_chiefing,
checkpoint_dir=arsg['save_dir'],\
hooks=hooks,\
save_checkpoint_secs=600.) as mon_sess:
tf_feed = ctx.get_data_feed(train_mode=True)
step = 0
while not mon_sess.should_stop() and not tf_feed.should_stop() and step < args['steps']:
batch_data, batch_labels = get_next_batch(tf_feed.next_batch(args['batch_size']))
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % task_index,
cluster=cluster)):
def build_model():
model_input = tf.placeholder(tf.float32,\
[None,args['num_features'] ])
model_labels = tf.placeholder(tf.float32, [None, args['num_classes'] ])
logits = tf.keras.layers.Dense(args['num_classes'])(model_input)
model_output = tf.nn.softmax(logits)
if mode == "train":
cluster.train(dataRDD, epochs)
else:
labelRDD = cluster.inference(dataRDD)