Skip to content

Instantly share code, notes, and snippets.

@csarofeen
Created February 7, 2018 06:43
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save csarofeen/141bb62c49f1d1b8e867a8d50ea62a4e to your computer and use it in GitHub Desktop.
Save csarofeen/141bb62c49f1d1b8e867a8d50ea62a4e to your computer and use it in GitHub Desktop.
import torch
import torch.nn as nn
from torch.autograd import Variable
torch.backends.cudnn.enabled=True
seq_size = 2
hidden_size = 3
inp_size = 4
batch_size = 5
num_layers = 2
bias = False
batch_first = False
dropout = 0.0
bidirectional = True
print("Seq_size", seq_size)
print("hidden_size", hidden_size)
print("inp_size", inp_size)
print("batch_size", batch_size)
print("num_layers", num_layers)
inp = torch.cuda.FloatTensor(seq_size, batch_size, inp_size).uniform_()
pytorch_lstm = nn.LSTM(inp_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional).cuda()
pyt_out, (pyt_hx, pyt_cx) = pytorch_lstm(Variable(inp))
pyt_out.sum().backward()
for layer in pytorch_lstm._all_weights:
for weight in layer:
print(weight, getattr(pytorch_lstm, weight).size())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment