Skip to content

Instantly share code, notes, and snippets.

@vanbasten23
Created December 20, 2022 05:52
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vanbasten23/0ce3de52f956439fa382b7e87f3f94ee to your computer and use it in GitHub Desktop.
Save vanbasten23/0ce3de52f956439fa382b7e87f3f94ee to your computer and use it in GitHub Desktop.
root@t1v-n-621e873b-w-0:/workspaces/work# python3 pytorch/xla/test/test_dynamic_shape_backward_models.py
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq:
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_:
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=1
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq:
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_:
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=0
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq:
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_:
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=1
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=1
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq:
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_:
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=1
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq:
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_:
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0
Traceback (most recent call last):
File "pytorch/xla/test/test_dynamic_shape_backward_models.py", line 82, in <module>
train(model, loss_fn=criterion, optimizer=optimizer)
File "pytorch/xla/test/test_dynamic_shape_backward_models.py", line 69, in train
loss.backward()
File "/home/ptxla/.local/lib/python3.8/site-packages/torch/_tensor.py", line 484, in backward
torch.autograd.backward(
File "/home/ptxla/.local/lib/python3.8/site-packages/torch/autograd/__init__.py", line 197, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: torch_xla/csrc/helpers.cpp:273 : Check failed: out_size <= size_at_dyndim / input_shape.dimensions( input_dynamic_dimension) (10 vs. 1)
*** Begin stack trace ***
tsl::CurrentStackTrace[abi:cxx11]()
torch_xla::XlaHelpers::GetDynamicReshapeInfo(xla::Shape const&, absl::lts_20220623::Span<long const>)
torch_xla::XlaHelpers::GetDynamicReshape(xla::Shape const&, absl::lts_20220623::Span<long const>)
torch_xla::Permute::MakePermuteShape(xla::Shape const&, absl::lts_20220623::Span<long const>)
torch_xla::ViewInfo::ViewInfo(torch_xla::ViewInfo::Type, xla::Shape, std::vector<long, std::allocator<long> >)
torch_xla::tensor_methods::transpose(c10::intrusive_ptr<torch_xla::XLATensor, c10::detail::intrusive_target_default_null_type<torch_xla::XLATensor> > const&, long, long)
torch_xla::XLANativeFunctions::t(at::Tensor const&)
at::_ops::t::redispatch(c10::DispatchKeySet, at::Tensor const&)
at::_ops::t::redispatch(c10::DispatchKeySet, at::Tensor const&)
at::_ops::t::call(at::Tensor const&)
torch::autograd::generated::AddmmBackward0::apply(std::vector<at::Tensor, std::allocator<at::Tensor> >&&)
torch::autograd::Engine::evaluate_function(std::shared_ptr<torch::autograd::GraphTask>&, torch::autograd::Node*, torch::autograd::InputBuffer&, std::shared_ptr<torch::autograd::ReadyQueue> const&)
torch::autograd::Engine::thread_main(std::shared_ptr<torch::autograd::GraphTask> const&)
torch::autograd::Engine::thread_init(int, std::shared_ptr<torch::autograd::ReadyQueue> const&, bool)
torch::autograd::python::PythonEngine::thread_init(int, std::shared_ptr<torch::autograd::ReadyQueue> const&, bool)
clone
*** End stack trace ***
Unable to map dynamic dimension of shape f32[<=80,10]{1,0} to output sizes (10, 80)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment