Skip to content

Instantly share code, notes, and snippets.

@ceshine
Last active September 30, 2017 05:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ceshine/3e589ef6b6e63a37f507588840bd6819 to your computer and use it in GitHub Desktop.
Save ceshine/3e589ef6b6e63a37f507588840bd6819 to your computer and use it in GitHub Desktop.
class Model(nn.Module):
def __init__(self, nb_words, hidden_size=128, embedding_size=128, n_layers=1,
wdrop=0.25, edrop=0.1, idrop=0.25, batch_first=True):
super(Model, self).__init__()
# Modified LockedDropout that support batch first arrangement
self.lockdrop = LockedDropout(batch_first=batch_first)
self.idrop = idrop
self.edrop = edrop
self.n_layers = n_layers
self.embedding = nn.Embedding(nb_words, embedding_size)
self.rnns = [
nn.LSTM(embedding_size if l == 0 else hidden_size,
hidden_size, num_layers=1, batch_first=batch_first)
for l in range(n_layers)
]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop)
for rnn in self.rnns]
self.rnns = torch.nn.ModuleList(self.rnns)
self.output_layer = nn.Linear(hidden_size, 1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embedding.weight.data.uniform_(-initrange, initrange)
self.output_layer.bias.data.fill_(0)
self.output_layer.weight.data.uniform_(-initrange, initrange)
def forward(self, X):
emb = embedded_dropout(self.embedding, X, dropout=self.edrop if self.training else 0)
raw_output = self.lockdrop(emb, self.idrop)
new_hidden, new_cell_state = [], []
for l, rnn in enumerate(self.rnns):
raw_output, (new_h, new_c) = rnn(raw_output)
raw_output = self.lockdrop(raw_output, self.idrop)
new_hidden.append(new_h)
new_cell_state.append(new_c)
hidden = torch.cat(new_hidden, 0)
cell_state = torch.cat(new_cell_state, 0)
final_output = self.output_layer(raw_output)
return final_output, hidden, cell_state
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment