Skip to content

Instantly share code, notes, and snippets.

@ailzhang
Created September 4, 2019 20:06
Show Gist options
  • Save ailzhang/54b8d31894aad2883ef4cbc1fde24a31 to your computer and use it in GitHub Desktop.
Save ailzhang/54b8d31894aad2883ef4cbc1fde24a31 to your computer and use it in GitHub Desktop.
2019-09-04 20:04:55.629680: I tensorflow/compiler/xla/xla_client/xrt_computation_client.cc:196] XRT device (LOCAL) CPU:0 -> /job:localservice/replica:0/task:0/device:XLA_CPU:0
2019-09-04 20:04:55.629791: I tensorflow/compiler/xla/xla_client/xrt_computation_client.cc:200] Worker grpc://localhost:40934 for /job:localservice/replica:0/task:0
2019-09-04 20:04:55.629800: I tensorflow/compiler/xla/xla_client/xrt_computation_client.cc:204] XRT default device: CPU:0
2019-09-04 20:04:55.629828: I tensorflow/compiler/xla/xla_client/xrt_local_service.cc:40] Peer localservice 1 {localhost:40934}
2019-09-04 20:04:55.629959: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2019-09-04 20:04:55.655453: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2300000000 Hz
2019-09-04 20:04:55.661129: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x7f63e4000b50 executing computations on platform Host. Devices:
2019-09-04 20:04:55.661219: I tensorflow/compiler/xla/service/service.cc:175] StreamExecutor device (0): Host, Default Version
2019-09-04 20:04:55.664456: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:258] Initialize GrpcChannelCache for job localservice -> {0 -> localhost:40934}
2019-09-04 20:04:55.671675: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:365] Started server with target: grpc://localhost:40934
2019-09-04 20:04:55.733651: E tensorflow/compiler/xla/xla_client/tf_logging.cc:11] Check failed: status.status() == ::tensorflow::Status::OK() (Invalid argument: All components of the offset index in a gather op must either be a offset dimension or explicitly collapsed; got len(slice_sizes)=4, output_slice_sizes=1,2,3, collapsed_slice_dims=. vs. OK)
*** Begin stack trace ***
tensorflow::CurrentStackTrace[abi:cxx11]()
xla::Shape ConsumeValue<xla::Shape>(stream_executor::port::StatusOr<xla::Shape>&&)
torch_xla::XlaHelpers::ShapeOfXlaOp(xla::XlaOp const&)
torch_xla::ir::ops::InferOutputShape(absl::Span<xla::Shape const>, std::function<xla::XlaOp (absl::Span<xla::XlaOp const>)> const&)
std::function<xla::Shape ()>::operator()() const
torch_xla::ir::Node::GetOpShape(std::function<xla::Shape ()> const&) const
torch_xla::ir::Node::Node(torch_xla::ir::OpKind, absl::Span<torch_xla::ir::Value const>, std::function<xla::Shape ()> const&, unsigned long, unsigned long)
torch_xla::ir::ops::IndexSelect::IndexSelect(torch_xla::ir::Value const&, long long, torch_xla::ir::Value const&)
void __gnu_cxx::new_allocator<torch_xla::ir::ops::IndexSelect>::construct<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(torch_xla::ir::ops::IndexSelect*, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
void std::allocator_traits<std::allocator<torch_xla::ir::ops::IndexSelect> >::construct<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::allocator<torch_xla::ir::ops::IndexSelect>&, torch_xla::ir::ops::IndexSelect*, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::_Sp_counted_ptr_inplace<torch_xla::ir::ops::IndexSelect, std::allocator<torch_xla::ir::ops::IndexSelect>, (__gnu_cxx::_Lock_policy)2>::_Sp_counted_ptr_inplace<torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::__shared_count<(__gnu_cxx::_Lock_policy)2>::__shared_count<torch_xla::ir::ops::IndexSelect, std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::_Sp_make_shared_tag, torch_xla::ir::ops::IndexSelect*, std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::__shared_ptr<torch_xla::ir::ops::IndexSelect, (__gnu_cxx::_Lock_policy)2>::__shared_ptr<std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::_Sp_make_shared_tag, std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::ops::IndexSelect>::shared_ptr<std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::_Sp_make_shared_tag, std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::ops::IndexSelect> std::allocate_shared<torch_xla::ir::ops::IndexSelect, std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::ops::IndexSelect> std::make_shared<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::Node> torch_xla::ir::MakeNode<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
torch_xla::XLATensor::index_select(torch_xla::XLATensor const&, long long, torch_xla::XLATensor const&)
torch_xla::AtenXlaType::index_select(at::Tensor const&, long, at::Tensor const&)
torch::autograd::VariableType::index_select(at::Tensor const&, long, at::Tensor const&)
at::Tensor::index_select(long, at::Tensor const&) const
_PyCFunction_FastCallDict
_PyEval_EvalFrameDefault
PyEval_EvalCodeEx
PyEval_EvalCode
PyRun_FileExFlags
PyRun_SimpleFileExFlags
Py_Main
main
__libc_start_main
*** End stack trace ***
Traceback (most recent call last):
File "repro.py", line 10, in <module>
xla_result = a.index_select(0, ind_empty)
RuntimeError: /home/ubuntu/pytorch_maskrcnn/xla_maskrcnn/third_party/tensorflow/bazel-tensorflow/tensorflow/compiler/xla/xla_client/debug_macros.h:27 : Check failed: status.status() == ::tensorflow::Status::OK() (Invalid argument: All components of the offset index in a gather op must either be a offset dimension or explicitly collapsed; got len(slice_sizes)=4, output_slice_sizes=1,2,3, collapsed_slice_dims=. vs. OK)
*** Begin stack trace ***
tensorflow::CurrentStackTrace[abi:cxx11]()
xla::Shape ConsumeValue<xla::Shape>(stream_executor::port::StatusOr<xla::Shape>&&)
torch_xla::XlaHelpers::ShapeOfXlaOp(xla::XlaOp const&)
torch_xla::ir::ops::InferOutputShape(absl::Span<xla::Shape const>, std::function<xla::XlaOp (absl::Span<xla::XlaOp const>)> const&)
std::function<xla::Shape ()>::operator()() const
torch_xla::ir::Node::GetOpShape(std::function<xla::Shape ()> const&) const
torch_xla::ir::Node::Node(torch_xla::ir::OpKind, absl::Span<torch_xla::ir::Value const>, std::function<xla::Shape ()> const&, unsigned long, unsigned long)
torch_xla::ir::ops::IndexSelect::IndexSelect(torch_xla::ir::Value const&, long long, torch_xla::ir::Value const&)
void __gnu_cxx::new_allocator<torch_xla::ir::ops::IndexSelect>::construct<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(torch_xla::ir::ops::IndexSelect*, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
void std::allocator_traits<std::allocator<torch_xla::ir::ops::IndexSelect> >::construct<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::allocator<torch_xla::ir::ops::IndexSelect>&, torch_xla::ir::ops::IndexSelect*, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::_Sp_counted_ptr_inplace<torch_xla::ir::ops::IndexSelect, std::allocator<torch_xla::ir::ops::IndexSelect>, (__gnu_cxx::_Lock_policy)2>::_Sp_counted_ptr_inplace<torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::__shared_count<(__gnu_cxx::_Lock_policy)2>::__shared_count<torch_xla::ir::ops::IndexSelect, std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::_Sp_make_shared_tag, torch_xla::ir::ops::IndexSelect*, std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::__shared_ptr<torch_xla::ir::ops::IndexSelect, (__gnu_cxx::_Lock_policy)2>::__shared_ptr<std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::_Sp_make_shared_tag, std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::ops::IndexSelect>::shared_ptr<std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::_Sp_make_shared_tag, std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::ops::IndexSelect> std::allocate_shared<torch_xla::ir::ops::IndexSelect, std::allocator<torch_xla::ir::ops::IndexSelect>, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(std::allocator<torch_xla::ir::ops::IndexSelect> const&, torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::ops::IndexSelect> std::make_shared<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
std::shared_ptr<torch_xla::ir::Node> torch_xla::ir::MakeNode<torch_xla::ir::ops::IndexSelect, torch_xla::ir::Value, long long, torch_xla::ir::Value&>(torch_xla::ir::Value&&, long long&&, torch_xla::ir::Value&)
torch_xla::XLATensor::index_select(torch_xla::XLATensor const&, long long, torch_xla::XLATensor const&)
torch_xla::AtenXlaType::index_select(at::Tensor const&, long, at::Tensor const&)
torch::autograd::VariableType::index_select(at::Tensor const&, long, at::Tensor const&)
at::Tensor::index_select(long, at::Tensor const&) const
_PyCFunction_FastCallDict
_PyEval_EvalFrameDefault
PyEval_EvalCodeEx
PyEval_EvalCode
PyRun_FileExFlags
PyRun_SimpleFileExFlags
Py_Main
main
__libc_start_main
*** End stack trace ***
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment