This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
n.samples = 100 | |
series.A = rnorm(n.samples, 0, 2) | |
series.B = rnorm(n.samples, 0.1, 2) | |
bundle = data.frame(y=c(series.A, series.B), x=c(rep(0, n.samples), rep(1, n.samples))) | |
summary(aov(y ~ factor(x), data=bundle)) | |
resampleTotal <- function(x){ | |
sum(sample(x, length(x), replace=T)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"# Bayesian Logistic Regression on the Kaggle titanic dataset" | |
] | |
}, | |
{ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
if (!require(pacman)){ install.packages("pacman") } | |
pacman::p_load(data.table, zoo, dygraphs) | |
# Data Source: http://www.stateair.net/web/historical/1/1.html | |
quality = rbind( | |
fread("Beijing_2015_HourlyPM25_created20160201.csv", skip=3), | |
fread("Beijing_2016_HourlyPM25_created20170201.csv", skip=3), | |
fread("Beijing_2017_HourlyPM25_created20170705\ (3).csv", skip=3) | |
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1174 | |
class LSTM(Recurrent): | |
#... | |
def get_constants(self, inputs, training=None): | |
#... | |
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1))) | |
ones = K.tile(ones, (1, self.units)) | |
def dropped_inputs(): # pylint: disable=function-redefined | |
return K.dropout(ones, self.recurrent_dropout) | |
rec_dp_mask = [ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1197 | |
class LSTM(Recurrent): | |
#... | |
def step(self, inputs, states): | |
#... | |
if self.implementation == 2: | |
#... | |
else: | |
if self.implementation == 0: | |
x_i = inputs[:, :self.units] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1197 | |
class LSTM(Recurrent): | |
#... | |
def step(self, inputs, states): | |
if self.implementation == 2: | |
z = K.dot(inputs * dp_mask[0], self.kernel) | |
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel) | |
if self.use_bias: | |
z = K.bias_add(z, self.bias) | |
#... |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1163 | |
class LSTM(Recurrent): | |
#... | |
def get_constants(self, inputs, training=None): | |
#... | |
input_shape = K.int_shape(inputs) | |
input_dim = input_shape[-1] | |
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1))) | |
ones = K.tile(ones, (1, int(input_dim))) | |
def dropped_inputs(): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/salesforce/awd-lstm-lm/blob/dfd3cb0235d2caf2847a4d53e1cbd495b781b5d2/locked_dropout.py#L5 | |
class LockedDropout(nn.Module): | |
# ... | |
def forward(self, x, dropout=0.5): | |
if not self.training or not dropout: | |
return x | |
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout) | |
mask = Variable(m, requires_grad=False) / (1 - dropout) | |
mask = mask.expand_as(x) | |
return mask * x |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/salesforce/awd-lstm-lm/blob/dfd3cb0235d2caf2847a4d53e1cbd495b781b5d2/weight_drop.py#L5 | |
class WeightDrop(torch.nn.Module): | |
def __init__(self, module, weights, dropout=0, variational=False): | |
# ... | |
self._setup() | |
# ... | |
def _setup(self): | |
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN | |
if issubclass(type(self.module), torch.nn.RNNBase): | |
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/salesforce/awd-lstm-lm/blob/dfd3cb0235d2caf2847a4d53e1cbd495b781b5d2/embed_regularize.py#L6 | |
def embedded_dropout(embed, words, dropout=0.1, scale=None): | |
if dropout: | |
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout) | |
mask = Variable(mask) | |
masked_embed_weight = mask * embed.weight | |
else: | |
masked_embed_weight = embed.weight | |
if scale: | |
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight |
OlderNewer