Skip to content

Instantly share code, notes, and snippets.

Avatar

CeShine Lee ceshine

View GitHub Profile
@ceshine
ceshine / lstm_recurrent_dropout.py
Created Sep 30, 2017
Key Code Blocks of Keras LSTM Dropout Implementation
View lstm_recurrent_dropout.py
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1174
class LSTM(Recurrent):
#...
def get_constants(self, inputs, training=None):
#...
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
@ceshine
ceshine / lstm_step_implementation_01.py
Created Sep 30, 2017
Key Code Blocks of Keras LSTM Dropout Implementation
View lstm_step_implementation_01.py
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1197
class LSTM(Recurrent):
#...
def step(self, inputs, states):
#...
if self.implementation == 2:
#...
else:
if self.implementation == 0:
x_i = inputs[:, :self.units]
@ceshine
ceshine / lstm_step_implementation_2.py
Created Sep 30, 2017
Key Code Blocks of Keras LSTM Dropout Implementation
View lstm_step_implementation_2.py
# https://github.com/tensorflow/tensorflow/blob/v1.3.0/tensorflow/contrib/keras/python/keras/layers/recurrent.py#L1197
class LSTM(Recurrent):
#...
def step(self, inputs, states):
if self.implementation == 2:
z = K.dot(inputs * dp_mask[0], self.kernel)
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
#...
@ceshine
ceshine / weight_drop.py
Last active Sep 30, 2017
Key Code Blocks of Pytorch RNN Dropout Implementation
View weight_drop.py
# https://github.com/salesforce/awd-lstm-lm/blob/dfd3cb0235d2caf2847a4d53e1cbd495b781b5d2/weight_drop.py#L5
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0, variational=False):
# ...
self._setup()
# ...
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition
@ceshine
ceshine / embed_regularize.py
Last active Sep 30, 2017
Key Code Blocks of Pytorch RNN Dropout Implementation
View embed_regularize.py
# https://github.com/salesforce/awd-lstm-lm/blob/dfd3cb0235d2caf2847a4d53e1cbd495b781b5d2/embed_regularize.py#L6
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
View pytorch_example.py
class Model(nn.Module):
def __init__(self, nb_words, hidden_size=128, embedding_size=128, n_layers=1,
wdrop=0.25, edrop=0.1, idrop=0.25, batch_first=True):
super(Model, self).__init__()
# Modified LockedDropout that support batch first arrangement
self.lockdrop = LockedDropout(batch_first=batch_first)
self.idrop = idrop
self.edrop = edrop
self.n_layers = n_layers
self.embedding = nn.Embedding(nb_words, embedding_size)
View eda.R
checkpoint::checkpoint("2017-10-26")
pacman::p_load(data.table)
pacman::p_load(caret)
pacman::p_load(ggplot2)
set.seed(998)
mushrooms <- fread("mushrooms.csv", stringsAsFactors=T)
mushrooms[, .N, by=class]
mushrooms[, eval("veil-type") := NULL]
@ceshine
ceshine / create_folder.py
Created Nov 2, 2017
Create a folder if not exists with pathlib
View create_folder.py
import pathlib
output_folder = "tmp/folder"
pathlib.Path(output_folder).mkdir(parents=True, exist_ok=True)
@ceshine
ceshine / birnn.ipynb
Created Nov 12, 2017
Figuring How Bidirectional RNN works in Pytorch
View birnn.ipynb
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@ceshine
ceshine / sgd.py
Created Dec 6, 2017
PyTorch SGD implementation
View sgd.py
# http://pytorch.org/docs/master/_modules/torch/optim/sgd.html#SGD
class SGD(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
# ...
def __setstate__(self, state):
# ...