Created
August 24, 2017 22:03
-
-
Save anonymous/83bf1c6aba8f4dd580b3485bf0e12442 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--------------------------------------------------------------------------- | |
RuntimeError Traceback (most recent call last) | |
<ipython-input-1-4fd28661ea7d> in <module>() | |
40 | |
41 for c in range(10): | |
---> 42 decoder_dist(input_[:,c].contiguous(), hidden) #RuntimeError: Expected hidden size (1, 4, 64), got (1, 32, 64) | |
43 | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs) | |
222 for hook in self._forward_pre_hooks.values(): | |
223 hook(self, input) | |
--> 224 result = self.forward(*input, **kwargs) | |
225 for hook in self._forward_hooks.values(): | |
226 hook_result = hook(self, input, result) | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py in forward(self, *inputs, **kwargs) | |
58 return self.module(*inputs[0], **kwargs[0]) | |
59 replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) | |
---> 60 outputs = self.parallel_apply(replicas, inputs, kwargs) | |
61 return self.gather(outputs, self.output_device) | |
62 | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py in parallel_apply(self, replicas, inputs, kwargs) | |
68 | |
69 def parallel_apply(self, replicas, inputs, kwargs): | |
---> 70 return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) | |
71 | |
72 def gather(self, outputs, output_device): | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py in parallel_apply(modules, inputs, kwargs_tup, devices) | |
65 output = results[i] | |
66 if isinstance(output, Exception): | |
---> 67 raise output | |
68 outputs.append(output) | |
69 return outputs | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py in _worker(i, module, input, kwargs, results, lock, device) | |
40 try: | |
41 with torch.cuda.device(device): | |
---> 42 output = module(*input, **kwargs) | |
43 with lock: | |
44 results[i] = output | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs) | |
222 for hook in self._forward_pre_hooks.values(): | |
223 hook(self, input) | |
--> 224 result = self.forward(*input, **kwargs) | |
225 for hook in self._forward_hooks.values(): | |
226 hook_result = hook(self, input, result) | |
<ipython-input-1-4fd28661ea7d> in forward(self, input, hidden) | |
17 batch_size = input.size(0) | |
18 encoded = self.encoder(input) | |
---> 19 output, hidden = self.rnn(encoded.view(1, batch_size, -1), hidden) | |
20 output = self.decoder(output.view(batch_size, -1)) | |
21 return output, hidden | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs) | |
222 for hook in self._forward_pre_hooks.values(): | |
223 hook(self, input) | |
--> 224 result = self.forward(*input, **kwargs) | |
225 for hook in self._forward_hooks.values(): | |
226 hook_result = hook(self, input, result) | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx) | |
160 flat_weight=flat_weight | |
161 ) | |
--> 162 output, hidden = func(input, self.all_weights, hx) | |
163 if is_packed: | |
164 output = PackedSequence(output, batch_sizes) | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, *fargs, **fkwargs) | |
349 else: | |
350 func = AutogradRNN(*args, **kwargs) | |
--> 351 return func(input, *fargs, **fkwargs) | |
352 | |
353 return forward | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/autograd/function.py in _do_forward(self, *input) | |
282 self._nested_input = input | |
283 flat_input = tuple(_iter_variables(input)) | |
--> 284 flat_output = super(NestedIOFunction, self)._do_forward(*flat_input) | |
285 nested_output = self._nested_output | |
286 nested_variables = _unflatten(flat_output, self._nested_output) | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/autograd/function.py in forward(self, *args) | |
304 def forward(self, *args): | |
305 nested_tensors = _map_variable_tensor(self._nested_input) | |
--> 306 result = self.forward_extended(*nested_tensors) | |
307 del self._nested_input | |
308 self._nested_output = result | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward_extended(self, input, weight, hx) | |
291 hy = tuple(h.new() for h in hx) | |
292 | |
--> 293 cudnn.rnn.forward(self, input, hx, weight, output, hy) | |
294 | |
295 self.save_for_backward(input, hx, weight, output) | |
~/miniconda3/envs/torchenv/lib/python3.6/site-packages/torch/backends/cudnn/rnn.py in forward(fn, input, hx, weight, output, hy) | |
264 if tuple(hx.size()) != hidden_size: | |
265 raise RuntimeError('Expected hidden size {}, got {}'.format( | |
--> 266 hidden_size, tuple(hx.size()))) | |
267 if cx is not None and tuple(cx.size()) != hidden_size: | |
268 raise RuntimeError('Expected cell size {}, got {}'.format( | |
RuntimeError: Expected hidden size (1, 4, 64), got (1, 32, 64) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment