Skip to content

Instantly share code, notes, and snippets.

@jamesr66a
Created November 30, 2018 20:22
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jamesr66a/0c26ce67c2718fabf7f3e3b7b7dad13c to your computer and use it in GitHub Desktop.
Save jamesr66a/0c26ce67c2718fabf7f3e3b7b7dad13c to your computer and use it in GitHub Desktop.
Traceback (most recent call last):
File "test/test_jit.py", line 5888, in test_module_pack_unpack
imported = self.getExportImportCopy(tm)
File "test/test_jit.py", line 281, in getExportImportCopy
torch.jit.save(imported, buffer)
File "/Users/jamesreed/onnx-fairseq/pytorch/torch/jit/__init__.py", line 143, in save
ret = m.save_to_buffer()
RuntimeError:
a leaf Variable that requires grad has been used in an in-place operation. (check_inplace at ../torch/csrc/autograd/VariableTypeUtils.h:49)
frame #0: c10::Error::Error(c10::SourceLocation, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&) + 64 (0x107d861f0 in libc10.dylib)
frame #1: torch::autograd::check_inplace(at::Tensor const&) + 231 (0x1172eab87 in libtorch.1.dylib)
frame #2: torch::autograd::VariableType::_th_set_(at::Tensor&, at::Tensor const&) const + 104 (0x1174c6d98 in libtorch.1.dylib)
frame #3: at::native::set_(at::Tensor&, at::Tensor const&) + 36 (0x1152396d4 in libcaffe2.dylib)
frame #4: at::TypeDefault::set_(at::Tensor&, at::Tensor const&) const + 15 (0x11557ef1f in libcaffe2.dylib)
frame #5: torch::autograd::VariableType::set_(at::Tensor&, at::Tensor const&) const + 427 (0x11755254b in libtorch.1.dylib)
frame #6: std::__1::__function::__func<torch::jit::(anonymous namespace)::$_487, std::__1::allocator<torch::jit::(anonymous namespace)::$_487>, int (std::__1::vector<c10::IValue, std::__1::allocator<c10::IValue> >&)>::operator()(std::__1::vector<c10::IValue, std::__1::allocator<c10::IValue> >&) + 93 (0x11787a76d in libtorch.1.dylib)
frame #7: torch::jit::InterpreterStateImpl::runImpl(std::__1::vector<c10::IValue, std::__1::allocator<c10::IValue> >&) + 455 (0x1179c9b17 in libtorch.1.dylib)
frame #8: torch::jit::InterpreterStateImpl::run(std::__1::vector<c10::IValue, std::__1::allocator<c10::IValue> >&) + 28 (0x1179c1c9c in libtorch.1.dylib)
frame #9: torch::jit::GraphExecutorImpl::run(std::__1::vector<c10::IValue, std::__1::allocator<c10::IValue> >&) + 4707 (0x117985ef3 in libtorch.1.dylib)
frame #10: torch::jit::script::Method::run(std::__1::vector<c10::IValue, std::__1::allocator<c10::IValue> >&) + 216 (0x114bfe168 in libtorch_python.dylib)
frame #11: torch::jit::script::Module::save(std::__1::basic_ostream<char, std::__1::char_traits<char> >&) + 137 (0x114c19069 in libtorch_python.dylib)
frame #12: void pybind11::cpp_function::initialize<torch::jit::script::initJitScriptBindings(_object*)::$_1, pybind11::bytes, std::__1::shared_ptr<torch::jit::script::Module>, pybind11::name, pybind11::is_method, pybind11::sibling>(torch::jit::script::initJitScriptBindings(_object*)::$_1&&, pybind11::bytes (*)(std::__1::shared_ptr<torch::jit::script::Module>), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::'lambda'(pybind11::detail::function_call&)::__invoke(pybind11::detail::function_call&) + 324 (0x114beffb4 in libtorch_python.dylib)
frame #13: pybind11::cpp_function::dispatcher(_object*, _object*, _object*) + 3482 (0x114832dda in libtorch_python.dylib)
<omitting python frames>
:
operation failed in interpreter:
op_version_set = 0
def forward(self,
x: Tensor) -> Tensor:
_0 = torch.add(x, self.my_derived_param, alpha=1)
return _0
def pack(self) -> Tensor:
_1 = torch.zeros(annotate(List[int], []), dtype=6, layout=0, device=[0, -1])
_2 = torch.set_(self.my_derived_param, _1)
~~~~~~~~~~ <--- HERE
_3 = torch.zeros([3, 4], dtype=6, layout=0, device=[0, -1])
return _3
def unpack(self) -> Tensor:
_4 = torch.set_(self.my_derived_param, self.my_new_param)
_5 = torch.zeros([3, 4], dtype=6, layout=0, device=[0, -1])
return _5
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment