Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

Created July 8, 2017 21:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save anonymous/9c0bd422a67212cdb5aebe27539d887a to your computer and use it in GitHub Desktop.
Save anonymous/9c0bd422a67212cdb5aebe27539d887a to your computer and use it in GitHub Desktop.
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-70-1639bf8d1c6f> in <module>()
----> 1 model.forward(x)
<ipython-input-66-0c01578a2402> in forward(self, x)
11 c0 = Variable(torch.zeros([1, batch_size, self.hidden_dim]), requires_grad=False)
12 print(h0, c0)
---> 13 fx, _ = self.lstm.forward(x, (h0, c0))
14 return self.linear.forward(fx[-1])
15
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx)
89 dropout_state=self.dropout_state
90 )
---> 91 output, hidden = func(input, self.all_weights, hx)
92 if is_packed:
93 output = PackedSequence(output, batch_sizes)
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, *fargs, **fkwargs)
341 else:
342 func = AutogradRNN(*args, **kwargs)
--> 343 return func(input, *fargs, **fkwargs)
344
345 return forward
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, weight, hidden)
241 input = input.transpose(0, 1)
242
--> 243 nexth, output = func(input, hidden, weight)
244
245 if batch_first and batch_sizes is None:
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, hidden, weight)
81 l = i * num_directions + j
82
---> 83 hy, output = inner(input, hidden[l], weight[l])
84 next_hidden.append(hy)
85 all_output.append(output)
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, hidden, weight)
110 steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
111 for i in steps:
--> 112 hidden = inner(input[i], hidden, *weight)
113 # hack to handle LSTM
114 output.append(isinstance(hidden, tuple) and hidden[0] or hidden)
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in LSTMCell(input, hidden, w_ih, w_hh, b_ih, b_hh)
28
29 hx, cx = hidden
---> 30 gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
31
32 ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/functional.py in linear(input, weight, bias)
447 def linear(input, weight, bias=None):
448 state = _functions.linear.Linear()
--> 449 return state(input, weight) if bias is None else state(input, weight, bias)
450
451
/home/monorhesus/.conda/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/linear.py in forward(self, input, weight, bias)
8 self.save_for_backward(input, weight, bias)
9 output = input.new(input.size(0), weight.size(0))
---> 10 output.addmm_(0, 1, input, weight.t())
11 if bias is not None:
12 # cuBLAS doesn't support 0 strides in sger, so we can't use expand
RuntimeError: size mismatch, m1: [20 x 1], m2: [5 x 512] at /py/conda-bld/pytorch_1493681908901/work/torch/lib/TH/generic/THTensorMath.c:1237
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment