I hereby claim:
- I am koeusiss on github.
- I am koeusiss (https://keybase.io/koeusiss) on keybase.
- I have a public key ASCSJc_YSGbCLa4WegQu2KJ3w8XfZ4-q4xvg3RM182dQpQo
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
| LinearRegression: RMSPE 0.30116 | |
| ElasticNet: RMSPE 0.31334 | |
| DecisionTreeRegressor: RMSPE 0.29511 | |
| MLPRegressor: RMSPE 0.54968 | |
| LGBMRegressor: RMSPE 0.29734 | |
| BaggingRegressor: RMSPE 0.29085 | |
| RandomForestRegressor: RMSPE 0.29252 | |
| ExtraTreesRegressor: RMSPE 0.29104 | |
| Super Learner: RMSPE 0.26938 |
| ## All prediction algorithm wrappers in SuperLearner: | |
| ## ----------------------------------------------------------------------- | |
| ## [1] "SL.bartMachine" "SL.bayesglm" "SL.biglasso" | |
| ## [4] "SL.caret" "SL.caret.rpart" "SL.cforest" | |
| ## [7] "SL.earth" "SL.extraTrees" "SL.gam" | |
| ## [10] "SL.gbm" "SL.glm" "SL.glm.interaction" | |
| ## [13] "SL.glmnet" "SL.ipredbagg" "SL.kernelKnn" | |
| ## [16] "SL.knn" "SL.ksvm" "SL.lda" | |
| ## [19] "SL.leekasso" "SL.lm" "SL.loess" | |
| ## [22] "SL.logreg" "SL.mean" "SL.nnet" |
| def conv_backward(dZ, A_prev, W, b, padding="same", stride=(1, 1)): | |
| """ | |
| Performs back-propagation over a convolutional layer of a neural network | |
| """ | |
| m, h_new, w_new, c_new = dZ.shape | |
| m, h_prev, w_prev, c_prev = A_prev.shape | |
| kh, kw, c_prev, c_new = W.shape | |
| sh, sw = stride | |
| dA_prev = np.zeros(A_prev.shape) |
| # importing | |
| import tensorflow as tf | |
| # alpha (float): Is the original learning rate | |
| # decay_rate (float): Is the weight used to determine the rate at which alpha will decay. | |
| # global_step (int): Is the number of passes of gradient descent that have elapsed. | |
| # decay_step (int): Is the number of passes of gradient descent that occur before alpha is decayed further. | |
| tf.train.inverse_time_decay( | |
| alpha, global_step, decay_step, decay_rate, staircase=True | |
| ) |
| # importing | |
| import tensorflow as tf | |
| # alpha: is the learning rate | |
| # beta1: is the first moment weight | |
| # beta2: is the second moment weight | |
| adam = tf.train.AdamOptimizer(alpha, beta1, beta2, epsilon) | |
| adam.minimize(loss) |
| # importing | |
| import tensorflow as tf | |
| # alpha: the learning rate | |
| # beta: rmsprop weight | |
| rms_prop = tf.train.RMSPropOptimizer(alpha, beta, epsilon=epsilon) | |
| rms_prop.minimize(loss) |
| # Import | |
| import numpy as np | |
| import matplolib.pyplot as plt | |
| # Define the input x axis value | |
| x = np.arange(-1, 1, 0.01) | |
| # Define the Heaviside function suing numpy builtin np.heaviside(x, y) where | |
| # x is the input (x axis) and y the value of the function when x is 0. | |
| y = np.heaviside(x, 0) | |
| plt.plot(x, y) |
| >>> def demo(x): | |
| ... print("x: {} -- id: {}".format(x, id(x))) | |
| ... x = 98 | |
| ... print("x: {} -- id: {}".format(x, id(x))) | |
| ... | |
| >>> demo(1) | |
| x:1 -- id: 93973708342016 | |
| x:98 -- id: 93973708345120 |
| >>> tpl = ([1, 2], 3) | |
| >>> tpl | |
| ([1, 2], 3) | |
| >>> id(tpl) | |
| 139678802945544 | |
| >>> tpl[0][0] = "a" | |
| >>> tpl | |
| (['a', 2], 3) | |
| >>> id(tpl) | |
| 139678802945544 |