Skip to content

Instantly share code, notes, and snippets.

View dlibenzi's full-sized avatar

Davide Libenzi dlibenzi

View GitHub Profile
static void xla_set_data(Tensor & self, Tensor new_data) {
auto _w_self = self.alias().ToMutableTensor();
auto _w_new_data = new_data.alias().ToMutableTensor();
auto&& __result = set_data(_w_self, _w_new_data);
(void) __result; // Avoid warnings in case not used
return __result;
}
static Tensor & xla__th_set_(Tensor & self, Storage source) {
auto _w_self = self.alias().ToMutableTensor();
// Autogenerated file by gen.py
static void xla_set_data(Tensor & self, Tensor new_data) {
auto _w_self = self.alias().ToMutableTensor();
auto _w_new_data = new_data.alias().ToMutableTensor();
auto&& __result = at::set_data(_w_self, _w_new_data);
(void) __result; // Avoid warnings in case not used
return __result;
}
// Autogenerated file by gen.py
static void xla_set_data(Tensor & self, Tensor new_data) {
auto _w_self = self.alias().ToMutableTensor();
auto _w_new_data = new_data.alias().ToMutableTensor();
at::set_data(_w_self, _w_new_data);
}
static Tensor & xla__th_set_(Tensor & self, Storage source) {
auto _w_self = self.alias().ToMutableTensor();
This file has been truncated, but you can view the full file.
// Autogenerated file by gen.py. Do not edit directly!
#include "aten_xla_bridge.h"
#include <ATen/Context.h>
#include <ATen/CPUGenerator.h>
#include <ATen/TypeDefault.h>
namespace torch_xla {
Thread 4576 (Thread 0x7fff927dc700 (LWP 12707)):
#0 0x00007ffff7bcb556 in futex_abstimed_wait_cancelable (private=0, abstime=0x0, expected=0, futex_word=0x7fffc4e234f0) at ../sysdeps/unix/sysv/linux/futex-internal.h:205
#1 do_futex_wait (sem=sem@entry=0x7fffc4e234f0, abstime=0x0) at sem_waitcommon.c:111
#2 0x00007ffff7bcb604 in __new_sem_wait_slow (sem=0x7fffc4e234f0, abstime=0x0) at sem_waitcommon.c:181
#3 0x000055555563ff76 in PyThread_acquire_lock_timed () at /tmp/build/80754af9/python_1546130271559/work/Python/thread_pthread.h:386
#4 0x00005555556d21ac in acquire_timed (timeout=-1000000000, lock=0x7fffc4e234f0) at /tmp/build/80754af9/python_1546130271559/work/Modules/_threadmodule.c:68
#5 lock_PyThread_acquire_lock () at /tmp/build/80754af9/python_1546130271559/work/Modules/_threadmodule.c:151
#6 0x0000555555665744 in _PyCFunction_FastCallDict () at /tmp/build/80754af9/python_1546130271559/work/Objects/methodobject.c:231
#7 0x00005555556ec42c in call_function () at /tmp/build/80754af9/python_1546
diff --git a/examples/xla_run_swag.py b/examples/xla_run_swag.py
index f3d2916..17b73a4 100644
--- a/examples/xla_run_swag.py
+++ b/examples/xla_run_swag.py
@@ -40,10 +40,11 @@ from pytorch_pretrained_bert.tokenization import BertTokenizer
import torch_xla
import torch_xla_py.utils as xu
import torch_xla_py.xla_model as xm
+import torch_xla_py.data_parallel as dp
// This constructor is non-standard, it is used by allocate_shared.
template<typename _Alloc, typename... _Args>
__shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
_Args&&... __args)
: _M_ptr(), _M_refcount(__tag, (_Tp*)0, __a,
STACK (count=24):
#0 pthread_cond_wait@@GLIBC_2.3.2 () at ../sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:185
#1 0x00007fffe89764cb in __gthread_cond_wait (__mutex=<optimized out>, __cond=<optimized out>) at /home/nwani/m3/conda-bld/compilers_linux-64_1560109574129/work/.build/x86_64-conda_cos6-linux-gnu/build/build-cc-gcc-final/x86_64-conda_cos6-linux-gnu/libstdc++-v3/include/x86_64-conda_cos6-linux-gnu/bits/gthr-default.h:878
#2 std::condition_variable::wait (this=<optimized out>, __lock=...) at /home/nwani/m3/conda-bld/compilers_linux-64_1560109574129/work/.build/x86_64-conda_cos6-linux-gnu/src/gcc/libstdc++-v3/src/c++11/condition_variable.cc:53
#3 0x00007fffb198d1ab in ?? () from /root/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch_xla/lib/libxla_computation_client.so
#4 0x00007fffe897a19d in std::execute_native_thread_routine (__p=X) at /home/nwani/m3/conda-bld/compilers_linux-64_1560109574129/work/.build/x86_64-conda_cos6-linux-gnu/src/gcc/libstdc++-v3/src/c++11/thread.cc:80
#5 0x0
STACK (count=506):
#0 0x00007ffff7bc39f3 in futex_wait_cancelable (private=<optimized out>, expected=X, futex_word=X) at ../sysdeps/unix/sysv/linux/futex-internal.h:88
#1 __pthread_cond_wait_common (abstime=X, mutex=X, cond=X) at pthread_cond_wait.c:502
#2 __pthread_cond_wait (cond=X, mutex=X) at pthread_cond_wait.c:655
#3 0x00007ffff5bfb064 in __kmp_suspend_64 () from /home/ubuntu/miniconda3/envs/maskrcnn36/lib/python3.6/site-packages/numpy/../../../libiomp5.so
#4 0x00007ffff5b6bc0d in bool _INTERNAL_25_______src_kmp_barrier_cpp_38a91946::__kmp_wait_template<kmp_flag_64, 1, false, true>(kmp_info*, kmp_flag_64*, void*) () from /home/ubuntu/miniconda3/envs/maskrcnn36/lib/python3.6/site-packages/numpy/../../../libiomp5.so
#5 0x00007ffff5b6ee8c in _INTERNAL_25_______src_kmp_barrier_cpp_38a91946::__kmp_hyper_barrier_release(barrier_type, kmp_info*, int, int, int, void*) () from /home/ubuntu/miniconda3/envs/maskrcnn36/lib/python3.6/site-packages/numpy/../../../libiomp5.so
#6 0x00007ffff5b753b2 in __kmp_fork
======================================================================
FAIL: test_chain_matmul_xla (__main__.TestTorchDeviceTypeXLA)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/dlibenzi_google_com/pytorch/test/common_device_type.py", line 127, in instantiated_test
return test(self, cls.device_type)
File "/home/dlibenzi_google_com/pytorch/xla/test/../../test/test_torch.py", line 7480, in test_chain_matmul
run_test([10, 20, 30, 5], device)
File "/home/dlibenzi_google_com/pytorch/xla/test/../../test/test_torch.py", line 7478, in run_test
self.assertEqual(torch.chain_matmul(*matrices), product(matrices))