Created
January 28, 2019 00:41
-
-
Save dlibenzi/58665ebe3704810d98a076ae4a57cad1 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Autogenerated file by gen.py | |
static void xla_set_data(Tensor & self, Tensor new_data) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _w_new_data = new_data.alias().ToMutableTensor(); | |
at::set_data(_w_self, _w_new_data); | |
} | |
static Tensor & xla__th_set_(Tensor & self, Storage source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_set_(_w_self, source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_set__1(Tensor & self, Storage source, int64_t storage_offset, IntList size, IntList stride) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_set_(_w_self, source, storage_offset, size, stride); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_set__2(Tensor & self, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::_th_set_(_w_self, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_set__3(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_set_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_fill_(Tensor & self, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_fill_(_w_self, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_fill__1(Tensor & self, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::_th_fill_(_w_self, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static bool xla__th_is_set_to(const Tensor & self, const Tensor & tensor) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor = tensor.alias().ToTensor(); | |
auto&& __result = at::_th_is_set_to(_r_self, _r_tensor); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor & xla__th_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::_th_masked_fill_(_w_self, _r_mask, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::s__th_masked_fill_(_w_self, _r_mask, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_masked_fill__1(Tensor & self, const Tensor & mask, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::_th_masked_fill_(_w_self, _r_mask, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_masked_fill__1(Tensor & self, const Tensor & mask, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::s__th_masked_fill_(_w_self, _r_mask, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::_th_masked_scatter_(_w_self, _r_mask, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::s__th_masked_scatter_(_w_self, _r_mask, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_masked_select_out(Tensor & result, const Tensor & self, const Tensor & mask) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::_th_masked_select_out(_w_result, _r_self, _r_mask); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_masked_select_out(Tensor & result, const Tensor & self, const Tensor & mask) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::s__th_masked_select_out(_w_result, _r_self, _r_mask); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_masked_select(const Tensor & self, const Tensor & mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::_th_masked_select(_r_self, _r_mask); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_masked_select(const Tensor & self, const Tensor & mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::s__th_masked_select(_r_self, _r_mask); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_nonzero_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_nonzero_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_nonzero(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_nonzero(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_clone(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clone(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_view(const Tensor & self, IntList size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_view(_r_self, size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_resize_as_(Tensor & self, const Tensor & the_template) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_the_template = the_template.alias().ToTensor(); | |
auto&& __result = at::_th_resize_as_(_w_self, _r_the_template); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_index_select_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_index_select_out(_w_result, _r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_index_select(const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_index_select(_r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::_th_index_copy_(_w_self, dim, _r_index, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_take_out(Tensor & result, const Tensor & self, const Tensor & index) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_take_out(_w_result, _r_self, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_take(const Tensor & self, const Tensor & index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_take(_r_self, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::_th_put_(_w_self, _r_index, _r_source, accumulate); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::_th_index_add_(_w_self, dim, _r_index, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_index_fill_(_w_self, dim, _r_index, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_index_fill__1(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::_th_index_fill_(_w_self, dim, _r_index, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_unfold_out(Tensor & result, const Tensor & self, int64_t dimension, int64_t size, int64_t step) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_unfold_out(_w_result, _r_self, dimension, size, step); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_unfold(_r_self, dimension, size, step); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::_th_scatter_(_w_self, dim, _r_index, _r_src); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_scatter__1(Tensor & self, int64_t dim, const Tensor & index, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_scatter_(_w_self, dim, _r_index, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::_th_scatter_add_(_w_self, dim, _r_index, _r_src); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_gather_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_gather_out(_w_result, _r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_gather(const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::_th_gather(_r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static bool xla__th_equal(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_equal(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor & xla__th_and_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_and_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_and(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_and(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_and_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_and_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_and_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_and_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_and_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_and(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_and(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_and(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_iand_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_iand_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_iand__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_iand_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_iand_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_iand_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_or_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_or_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_or(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_or(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_or_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_or_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_or_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_or_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_or_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_or(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_or(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_or(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ior_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_ior_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ior__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ior_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_ior_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ior_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_xor_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_xor_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_xor(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_xor(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_xor_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_xor_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_xor_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_xor_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_xor_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_xor(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_xor(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_xor(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ixor_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_ixor_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ixor__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ixor_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_ixor_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ixor_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_lshift_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_lshift_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_lshift(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_lshift(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_lshift_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_lshift_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_lshift_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_lshift_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_lshift_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_lshift(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_lshift(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_lshift(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ilshift_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_ilshift_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ilshift__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ilshift_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_ilshift_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ilshift_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_rshift_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_rshift_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_rshift(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_rshift(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_rshift_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_rshift_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_rshift_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_rshift_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_rshift_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_rshift(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_rshift(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_rshift(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_irshift_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_irshift_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_irshift__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_irshift_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_irshift_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_irshift_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_lt_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_lt_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_lt(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_lt(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_lt_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_lt_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_lt_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_lt_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_lt_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_lt(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_lt(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_lt(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_lt_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_lt_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_lt__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_lt_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_lt_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_lt_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_gt_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_gt_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_gt(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_gt(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_gt_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_gt_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_gt_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_gt_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_gt_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_gt(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_gt(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_gt(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_gt_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_gt_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_gt__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_gt_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_gt_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_gt_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_le_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_le_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_le(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_le(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_le_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_le_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_le_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_le_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_le_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_le(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_le(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_le(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_le_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_le_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_le__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_le_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_le_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_le_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ge_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_ge_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ge(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_ge(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ge_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ge_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_ge_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ge_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ge_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ge(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_ge(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ge(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ge_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_ge_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ge__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ge_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_ge_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ge_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_eq_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_eq_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_eq(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_eq(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_eq_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_eq_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_eq_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_eq_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_eq_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_eq(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_eq(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_eq(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_eq_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_eq_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_eq__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_eq_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_eq_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_eq_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ne_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_ne_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ne(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_ne(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ne_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ne_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_ne_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ne_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ne_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ne(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_ne(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ne(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ne_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_ne_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ne__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_ne_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_ne_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_ne_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_min_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_min_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_min_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_min_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_min(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_min(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_min(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_min(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_min_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_min(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_min_out_1(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_min = min.alias().ToMutableTensor(); | |
auto _w_min_indices = min_indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_min_out(_w_min, _w_min_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(min, min_indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_min_2(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_min(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__th_max_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_max_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_max_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_max_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_max(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_max(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_max(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_max(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_max_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_max(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_max_out_1(Tensor & max, Tensor & max_indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_max = max.alias().ToMutableTensor(); | |
auto _w_max_indices = max_indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_max_out(_w_max, _w_max_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(max, max_indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_max_2(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_max(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_kthvalue_out(_w_values, _w_indices, _r_self, k, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_kthvalue(_r_self, k, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_mode_out(_w_values, _w_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_mode(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_mode(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla__th_median(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_median(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_median_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_median_out(_w_values, _w_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_median_1(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_median(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sort_out(_w_values, _w_indices, _r_self, dim, descending); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_sort(const Tensor & self, int64_t dim, bool descending) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sort(_r_self, dim, descending); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_topk_out(_w_values, _w_indices, _r_self, k, dim, largest, sorted); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_topk(_r_self, k, dim, largest, sorted); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla__th_any(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_any(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_any_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_any_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_any_1(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_any(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_abs_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_abs_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_abs(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_abs(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_sigmoid_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sigmoid_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_sigmoid(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sigmoid(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_log_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_log(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_log10_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log10_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_log10(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log10(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_log1p_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log1p_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_log1p(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log1p(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_log2_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log2_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_log2(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_log2(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_lgamma_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_lgamma_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_lgamma(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_lgamma(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_lgamma_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_lgamma_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_digamma_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_digamma_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_digamma(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_digamma(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_digamma_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_digamma_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_polygamma_out(Tensor & result, int64_t n, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_polygamma_out(_w_result, n, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_polygamma(int64_t n, const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_polygamma(n, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_polygamma_(Tensor & self, int64_t n) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_polygamma_(_w_self, n); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_exp_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_exp_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_exp(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_exp(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_expm1_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_expm1_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_expm1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_expm1(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_cos_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cos_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_cos(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cos(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_acos_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_acos_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_acos(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_acos(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_cosh_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cosh_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_cosh(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cosh(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_sin_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sin_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_sin(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sin(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_asin_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_asin_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_asin(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_asin(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_sinh_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sinh_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_sinh(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sinh(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_tan_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_tan_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_tan(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_tan(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_atan_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_atan_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_atan(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_atan(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_tanh_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_tanh_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_tanh(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_tanh(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_erf_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_erf_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_erf(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_erf(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_erfc_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_erfc_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_erfc(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_erfc(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_erfinv_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_erfinv_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_erfinv_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_erfinv_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_erfinv(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_erfinv(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_sqrt_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sqrt_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_sqrt(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sqrt(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_rsqrt_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_rsqrt_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_rsqrt(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_rsqrt(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ceil_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_ceil_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ceil(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_ceil(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_floor_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_floor_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_floor(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_floor(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_round_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_round_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_round(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_round(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_trunc_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_trunc_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_trunc(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_trunc(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_frac_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_frac_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_frac_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_frac_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_frac(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_frac(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_var_out(Tensor & result, const Tensor & self, int64_t dim, bool unbiased, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_var_out(_w_result, _r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_var(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_var(_r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_var_1(const Tensor & self, bool unbiased) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_var(_r_self, unbiased); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_std_out(Tensor & result, const Tensor & self, int64_t dim, bool unbiased, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_std_out(_w_result, _r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_std(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_std(_r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_std_1(const Tensor & self, bool unbiased) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_std(_r_self, unbiased); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_renorm_out(_w_result, _r_self, p, dim, maxnorm); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_renorm(_r_self, p, dim, maxnorm); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_renorm_(_w_self, p, dim, maxnorm); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla__th_dist(const Tensor & self, const Tensor & other, Scalar p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_dist(_r_self, _r_other, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_dist(const Tensor & self, const Tensor & other, Scalar p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_dist(_r_self, _r_other, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_reciprocal_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_reciprocal_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_reciprocal(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_reciprocal(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_reciprocal_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_reciprocal_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_neg_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_neg_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_neg(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_neg(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_neg_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_neg_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_atan2_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_atan2_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_atan2_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_atan2_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_atan2(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_atan2(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_atan2(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_atan2(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_atan2_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_atan2_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_atan2_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_atan2_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_pow_out(Tensor & result, const Tensor & self, Scalar exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_pow_out(_w_result, _r_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_pow(const Tensor & self, Scalar exponent) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_pow(_r_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_pow_out_1(Tensor & result, const Tensor & self, const Tensor & exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::_th_pow_out(_w_result, _r_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_pow_out(Tensor & result, const Tensor & self, const Tensor & exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::s__th_pow_out(_w_result, _r_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_pow_1(const Tensor & self, const Tensor & exponent) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::_th_pow(_r_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_pow(const Tensor & self, const Tensor & exponent) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::s__th_pow(_r_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_pow_out_2(Tensor & result, Scalar self, const Tensor & exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::_th_pow_out(_w_result, self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_pow_2(Scalar self, const Tensor & exponent) { | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::_th_pow(self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(exponent)); | |
} | |
static Tensor & xla__th_pow_(Tensor & self, Scalar exponent) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_pow_(_w_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_pow__1(Tensor & self, const Tensor & exponent) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::_th_pow_(_w_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_pow_(Tensor & self, const Tensor & exponent) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::s__th_pow_(_w_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_lerp_out(Tensor & result, const Tensor & self, const Tensor & end, Scalar weight) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::_th_lerp_out(_w_result, _r_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_lerp_out(Tensor & result, const Tensor & self, const Tensor & end, Scalar weight) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::s__th_lerp_out(_w_result, _r_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_lerp(const Tensor & self, const Tensor & end, Scalar weight) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::_th_lerp(_r_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_lerp(const Tensor & self, const Tensor & end, Scalar weight) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::s__th_lerp(_r_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_lerp_(Tensor & self, const Tensor & end, Scalar weight) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::_th_lerp_(_w_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_lerp_(Tensor & self, const Tensor & end, Scalar weight) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::s__th_lerp_(_w_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_histc_out(Tensor & result, const Tensor & self, int64_t bins, Scalar min, Scalar max) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_histc_out(_w_result, _r_self, bins, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_histc(_r_self, bins, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_zero_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_zero_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_cumsum_out(Tensor & result, const Tensor & self, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cumsum_out(_w_result, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_cumsum(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cumsum(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_cumprod_out(Tensor & result, const Tensor & self, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cumprod_out(_w_result, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_cumprod(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_cumprod(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_sign_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sign_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_sign(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_sign(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_sign_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_sign_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla__th_trace(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_trace(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_fmod_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_fmod_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_fmod(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_fmod(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_fmod_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_fmod_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_fmod_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_fmod_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_fmod_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_fmod(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_fmod(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_fmod(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_fmod_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_fmod_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_fmod__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_fmod_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_fmod_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_fmod_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_remainder_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_remainder_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_remainder(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_remainder(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_remainder_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_remainder_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_remainder_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_remainder_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_remainder_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_remainder(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_remainder(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_remainder(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_remainder_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_remainder_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_remainder__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_remainder_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_remainder_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::s__th_remainder_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_clamp_out(Tensor & result, const Tensor & self, Scalar min, Scalar max) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clamp_out(_w_result, _r_self, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_clamp(const Tensor & self, Scalar min, Scalar max) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clamp(_r_self, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_clamp_min_out(Tensor & result, const Tensor & self, Scalar min) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clamp_min_out(_w_result, _r_self, min); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_clamp_min(const Tensor & self, Scalar min) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clamp_min(_r_self, min); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_clamp_max_out(Tensor & result, const Tensor & self, Scalar max) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clamp_max_out(_w_result, _r_self, max); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_clamp_max(const Tensor & self, Scalar max) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_clamp_max(_r_self, max); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__th_dot(const Tensor & self, const Tensor & tensor) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor = tensor.alias().ToTensor(); | |
auto&& __result = at::_th_dot(_r_self, _r_tensor); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_cross_out(Tensor & result, const Tensor & self, const Tensor & other, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_cross_out(_w_result, _r_self, _r_other, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_cross(const Tensor & self, const Tensor & other, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_th_cross(_r_self, _r_other, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_diag_out(Tensor & result, const Tensor & self, int64_t diagonal) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_diag_out(_w_result, _r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_diag(const Tensor & self, int64_t diagonal) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_diag(_r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addmm_out(Tensor & result, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_addmm_out(_w_result, _r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_addmm_out(Tensor & result, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::s__th_addmm_out(_w_result, _r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_addmm(_r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::s__th_addmm(_r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_addmm_(_w_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_addmv_out(Tensor & result, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::_th_addmv_out(_w_result, _r_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_addmv_out(Tensor & result, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::s__th_addmv_out(_w_result, _r_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::_th_addmv(_r_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::s__th_addmv(_r_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::_th_addmv_(_w_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_addr_out(Tensor & result, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::_th_addr_out(_w_result, _r_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_addr_out(Tensor & result, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::s__th_addr_out(_w_result, _r_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::_th_addr(_r_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::s__th_addr(_r_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::_th_addr_(_w_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_ger_out(Tensor & result, const Tensor & self, const Tensor & vec2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::_th_ger_out(_w_result, _r_self, _r_vec2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ger(const Tensor & self, const Tensor & vec2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::_th_ger(_r_self, _r_vec2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_mv_out(Tensor & result, const Tensor & self, const Tensor & vec) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::_th_mv_out(_w_result, _r_self, _r_vec); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_mv(const Tensor & self, const Tensor & vec) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::_th_mv(_r_self, _r_vec); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_mm_out(Tensor & result, const Tensor & self, const Tensor & mat2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_mm_out(_w_result, _r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_mm(const Tensor & self, const Tensor & mat2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_mm(_r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_bmm_out(Tensor & result, const Tensor & self, const Tensor & mat2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_bmm_out(_w_result, _r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_bmm(const Tensor & self, const Tensor & mat2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::_th_bmm(_r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::_th_addbmm_out(_w_result, _r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_addbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::s__th_addbmm_out(_w_result, _r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::_th_addbmm(_r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::s__th_addbmm(_r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::_th_addbmm_(_w_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_baddbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::_th_baddbmm_out(_w_result, _r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_baddbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::s__th_baddbmm_out(_w_result, _r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::_th_baddbmm(_r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::s__th_baddbmm(_r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addcmul_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::_th_addcmul_out(_w_result, _r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_addcmul_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::s__th_addcmul_out(_w_result, _r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::_th_addcmul(_r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::s__th_addcmul(_r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::_th_addcmul_(_w_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::s__th_addcmul_(_w_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_addcdiv_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::_th_addcdiv_out(_w_result, _r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_s__th_addcdiv_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::s__th_addcdiv_out(_w_result, _r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::_th_addcdiv(_r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_s__th_addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::s__th_addcdiv(_r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::_th_addcdiv_(_w_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_s__th_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::s__th_addcdiv_(_w_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_gels_out(Tensor & res1, Tensor & res2, const Tensor & self, const Tensor & A) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::_th_gels_out(_w_res1, _w_res2, _r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_gels(const Tensor & self, const Tensor & A) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::_th_gels(_r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_trtrs_out(Tensor & res1, Tensor & res2, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::_th_trtrs_out(_w_res1, _w_res2, _r_self, _r_A, upper, transpose, unitriangular); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::_th_trtrs(_r_self, _r_A, upper, transpose, unitriangular); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_symeig_out(Tensor & res1, Tensor & res2, const Tensor & self, bool eigenvectors, bool upper) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_symeig_out(_w_res1, _w_res2, _r_self, eigenvectors, upper); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_symeig(const Tensor & self, bool eigenvectors, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_symeig(_r_self, eigenvectors, upper); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_eig_out(Tensor & res1, Tensor & res2, const Tensor & self, bool eigenvectors) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_eig_out(_w_res1, _w_res2, _r_self, eigenvectors); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_eig(const Tensor & self, bool eigenvectors) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_eig(_r_self, eigenvectors); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__th_svd_out(Tensor & res1, Tensor & res2, Tensor & res3, const Tensor & self, bool some, bool compute_uv) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _w_res3 = res3.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_svd_out(_w_res1, _w_res2, _w_res3, _r_self, some, compute_uv); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(res1, res2, res3); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__th_svd(const Tensor & self, bool some, bool compute_uv) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_svd(_r_self, some, compute_uv); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__th_getri_single_out(Tensor & output, const Tensor & self) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_getri_single_out(_w_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__th_getri_single(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_getri_single(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_potri_out(Tensor & output, const Tensor & self, bool upper) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_potri_out(_w_output, _r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__th_potri(const Tensor & self, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_potri(_r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_pstrf_out(Tensor & res1, Tensor & res2, const Tensor & self, bool upper, Scalar tol) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_pstrf_out(_w_res1, _w_res2, _r_self, upper, tol); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_pstrf(const Tensor & self, bool upper, Scalar tol) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_pstrf(_r_self, upper, tol); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_qr_out(Tensor & res1, Tensor & res2, const Tensor & self) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_qr_out(_w_res1, _w_res2, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_qr(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_qr(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_geqrf_out(Tensor & res1, Tensor & res2, const Tensor & self) { | |
auto _w_res1 = res1.alias().ToMutableTensor(); | |
auto _w_res2 = res2.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_geqrf_out(_w_res1, _w_res2, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(res1, res2); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_geqrf(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_geqrf(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__th_orgqr_out(Tensor & result, const Tensor & self, const Tensor & input2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto&& __result = at::_th_orgqr_out(_w_result, _r_self, _r_input2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_orgqr(const Tensor & self, const Tensor & input2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto&& __result = at::_th_orgqr(_r_self, _r_input2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_ormqr_out(Tensor & result, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_input3 = input3.alias().ToTensor(); | |
auto&& __result = at::_th_ormqr_out(_w_result, _r_self, _r_input2, _r_input3, left, transpose); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_input3 = input3.alias().ToTensor(); | |
auto&& __result = at::_th_ormqr(_r_self, _r_input2, _r_input3, left, transpose); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__th_btrifact_out(Tensor & result, Tensor & pivots, const Tensor & self, bool pivot) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _w_pivots = pivots.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_btrifact_out(_w_result, _w_pivots, _r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(result, pivots); | |
} | |
static std::tuple<Tensor,Tensor> xla__th_btrifact(const Tensor & self, bool pivot) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_btrifact(_r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__th_btrifact_with_info_out(Tensor & result, Tensor & pivots, Tensor & info, const Tensor & self, bool pivot) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _w_pivots = pivots.alias().ToMutableTensor(); | |
auto _w_info = info.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_btrifact_with_info_out(_w_result, _w_pivots, _w_info, _r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(result, pivots, info); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__th_btrifact_with_info(const Tensor & self, bool pivot) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_btrifact_with_info(_r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__th_btrisolve_out(Tensor & result, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_LU_data = LU_data.alias().ToTensor(); | |
auto _r_LU_pivots = LU_pivots.alias().ToTensor(); | |
auto&& __result = at::_th_btrisolve_out(_w_result, _r_self, _r_LU_data, _r_LU_pivots); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_LU_data = LU_data.alias().ToTensor(); | |
auto _r_LU_pivots = LU_pivots.alias().ToTensor(); | |
auto&& __result = at::_th_btrisolve(_r_self, _r_LU_data, _r_LU_pivots); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_random_(Tensor & self, int64_t from, int64_t to, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_random_(_w_self, from, to, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_random__1(Tensor & self, int64_t to, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_random_(_w_self, to, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_random__2(Tensor & self, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_random_(_w_self, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_multinomial_out(Tensor & result, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_multinomial_out(_w_result, _r_self, num_samples, replacement, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__th_multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_multinomial(_r_self, num_samples, replacement, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_uniform_(Tensor & self, double from, double to, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_uniform_(_w_self, from, to, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_mean = mean.alias().ToTensor(); | |
auto&& __result = at::_th_normal_out(_w_output, _r_mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__th_normal(const Tensor & mean, double std, Generator * generator) { | |
auto _r_mean = mean.alias().ToTensor(); | |
auto&& __result = at::_th_normal(_r_mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(mean)); | |
} | |
static Tensor & xla__th_normal_out_1(Tensor & output, double mean, const Tensor & std, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::_th_normal_out(_w_output, mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__th_normal_1(double mean, const Tensor & std, Generator * generator) { | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::_th_normal(mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(std)); | |
} | |
static Tensor & xla__th_normal_out_2(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_mean = mean.alias().ToTensor(); | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::_th_normal_out(_w_output, _r_mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__th_normal_2(const Tensor & mean, const Tensor & std, Generator * generator) { | |
auto _r_mean = mean.alias().ToTensor(); | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::_th_normal(_r_mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(std)); | |
} | |
static Tensor & xla__th_normal_(Tensor & self, double mean, double std, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_normal_(_w_self, mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_cauchy_(Tensor & self, double median, double sigma, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_cauchy_(_w_self, median, sigma, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_log_normal_(Tensor & self, double mean, double std, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_log_normal_(_w_self, mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_exponential_(Tensor & self, double lambd, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_exponential_(_w_self, lambd, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_geometric_(Tensor & self, double p, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_th_geometric_(_w_self, p, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_x = x.alias().ToTensor(); | |
auto _r_alpha = alpha.alias().ToTensor(); | |
auto _r_total = total.alias().ToTensor(); | |
auto&& __result = at::_th_dirichlet_grad_out(_w_output, _r_x, _r_alpha, _r_total); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__th_dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
auto _r_x = x.alias().ToTensor(); | |
auto _r_alpha = alpha.alias().ToTensor(); | |
auto _r_total = total.alias().ToTensor(); | |
auto&& __result = at::_th_dirichlet_grad(_r_x, _r_alpha, _r_total); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(total)); | |
} | |
static Tensor xla__th_alias(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_th_alias(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__th_copy_ignoring_overlaps_(Tensor & self, const Tensor & src) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::_th_copy_ignoring_overlaps_(_w_self, _r_src); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__th_cat_out(Tensor & self, TensorList tensors, int64_t dim) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::_th_cat_out(_w_self, _l_tensors, dim); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla__th_cat(TensorList tensors, int64_t dim) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::_th_cat(_l_tensors, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(tensors)); | |
} | |
static Tensor & xla__thnn_binary_cross_entropy_forward_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_binary_cross_entropy_forward_out(_w_output, _r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_binary_cross_entropy_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_binary_cross_entropy_forward(_r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_binary_cross_entropy_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_binary_cross_entropy_backward(_r_grad_output, _r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_l1_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_l1_loss_forward_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_l1_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_l1_loss_forward(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_l1_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_l1_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_mse_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_mse_loss_forward_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_mse_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_mse_loss_forward(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_mse_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_mse_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_multi_margin_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_multi_margin_loss_forward_out(_w_output, _r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_multi_margin_loss_forward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_multi_margin_loss_forward(_r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_multi_margin_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_multi_margin_loss_backward(_r_grad_output, _r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_is_target = is_target.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_multilabel_margin_loss_forward_out(_w_output, _w_is_target, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, is_target); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_multilabel_margin_loss_forward(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_is_target = is_target.alias().ToTensor(); | |
auto&& __result = at::_thnn_multilabel_margin_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction, _r_is_target); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_is_target = is_target.alias().ToTensor(); | |
auto&& __result = at::_thnn_multilabel_margin_loss_backward(_r_grad_output, _r_self, _r_target, reduction, _r_is_target); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_total_weight = total_weight.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss_forward_out(_w_output, _w_total_weight, _r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, total_weight); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss_forward(_r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss_backward(_r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_total_weight = total_weight.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss2d_forward_out(_w_output, _w_total_weight, _r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, total_weight); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss2d_forward(_r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss2d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_nll_loss2d_backward(_r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_smooth_l1_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_smooth_l1_loss_forward_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_smooth_l1_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_smooth_l1_loss_forward(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_smooth_l1_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_smooth_l1_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_soft_margin_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_soft_margin_loss_forward_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_soft_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_soft_margin_loss_forward(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_soft_margin_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::_thnn_soft_margin_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_elu_forward_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_elu_forward_out(_w_output, _r_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_elu_forward(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_elu_forward(_r_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_elu_backward_out(_w_grad_input, _r_grad_output, alpha, scale, input_scale, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_elu_backward(_r_grad_output, alpha, scale, input_scale, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(output)); | |
} | |
static Tensor & xla__thnn_elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_thnn_elu_(_w_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_elu_forward_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_thnn_elu_forward_(_w_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_glu_forward_out(Tensor & output, const Tensor & self, int64_t dim) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_glu_forward_out(_w_output, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_glu_forward(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_glu_forward(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_glu_backward_out(_w_grad_input, _r_grad_output, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_glu_backward(_r_grad_output, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_hardtanh_forward_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_hardtanh_forward_out(_w_output, _r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_hardtanh_forward(const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_hardtanh_forward(_r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_hardtanh_backward_out(_w_grad_input, _r_grad_output, _r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_hardtanh_backward(_r_grad_output, _r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_thnn_hardtanh_(_w_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_hardtanh_forward_(Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_thnn_hardtanh_forward_(_w_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_leaky_relu_forward_out(Tensor & output, const Tensor & self, Scalar negative_slope) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_leaky_relu_forward_out(_w_output, _r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_leaky_relu_forward(const Tensor & self, Scalar negative_slope) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_leaky_relu_forward(_r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_leaky_relu_backward_out(_w_grad_input, _r_grad_output, _r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_leaky_relu_backward(_r_grad_output, _r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_leaky_relu_(Tensor & self, Scalar negative_slope) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_thnn_leaky_relu_(_w_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_leaky_relu_forward_(Tensor & self, Scalar negative_slope) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_thnn_leaky_relu_forward_(_w_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_buffer = buffer.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_log_sigmoid_forward_out(_w_output, _w_buffer, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, buffer); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_log_sigmoid_forward(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_log_sigmoid_forward(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_buffer = buffer.alias().ToTensor(); | |
auto&& __result = at::_thnn_log_sigmoid_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_buffer); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_buffer = buffer.alias().ToTensor(); | |
auto&& __result = at::_thnn_log_sigmoid_backward(_r_grad_output, _r_self, _r_buffer); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_rrelu_with_noise_forward_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::_thnn_rrelu_with_noise_forward_out(_w_output, _r_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::_thnn_rrelu_with_noise_forward(_r_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::_thnn_rrelu_with_noise_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_noise, lower, upper, training); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::_thnn_rrelu_with_noise_backward(_r_grad_output, _r_self, _r_noise, lower, upper, training); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::_thnn_rrelu_with_noise_(_w_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_rrelu_with_noise_forward_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::_thnn_rrelu_with_noise_forward_(_w_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__thnn_softplus_forward_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_softplus_forward_out(_w_output, _r_self, beta, threshold); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_softplus_forward(const Tensor & self, Scalar beta, Scalar threshold) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_softplus_forward(_r_self, beta, threshold); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_softplus_backward_out(_w_grad_input, _r_grad_output, _r_self, beta, threshold, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_softplus_backward(_r_grad_output, _r_self, beta, threshold, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_softshrink_forward_out(Tensor & output, const Tensor & self, Scalar lambd) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_softshrink_forward_out(_w_output, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_softshrink_forward(const Tensor & self, Scalar lambd) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_softshrink_forward(_r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_softshrink_backward_out(_w_grad_input, _r_grad_output, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_softshrink_backward(_r_grad_output, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_adaptive_avg_pool3d_forward_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_avg_pool3d_forward_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_adaptive_avg_pool3d_forward(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_avg_pool3d_forward(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_avg_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_avg_pool3d_backward(_r_grad_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_adaptive_max_pool2d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool2d_forward_out(_w_output, _w_indices, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_adaptive_max_pool2d_forward(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool2d_forward(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool2d_backward(_r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_adaptive_max_pool3d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool3d_forward_out(_w_output, _w_indices, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_adaptive_max_pool3d_forward(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool3d_forward(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_adaptive_max_pool3d_backward(_r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_avg_pool2d_forward_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool2d_forward_out(_w_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_avg_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool2d_forward(_r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool2d_backward(_r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_avg_pool3d_forward_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool3d_forward_out(_w_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_avg_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool3d_forward(_r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_avg_pool3d_backward(_r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_max_pool2d_with_indices_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool2d_with_indices_forward_out(_w_output, _w_indices, _r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_max_pool2d_with_indices_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool2d_with_indices_forward(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool2d_with_indices_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool2d_with_indices_backward(_r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_max_pool3d_with_indices_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool3d_with_indices_forward_out(_w_output, _w_indices, _r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_max_pool3d_with_indices_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool3d_with_indices_forward(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool3d_with_indices_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_pool3d_with_indices_backward(_r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_max_unpool2d_forward_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool2d_forward_out(_w_output, _r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_max_unpool2d_forward(const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool2d_forward(_r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool2d_backward(_r_grad_output, _r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_max_unpool3d_forward_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool3d_forward_out(_w_output, _r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_max_unpool3d_forward(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool3d_forward(_r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::_thnn_max_unpool3d_backward(_r_grad_output, _r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_linear1d_forward_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_linear1d_forward_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_linear1d_forward(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_linear1d_forward(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_linear1d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_linear1d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_upsample_bilinear2d_forward_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bilinear2d_forward_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_bilinear2d_forward(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bilinear2d_forward(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bilinear2d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bilinear2d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_upsample_bicubic2d_forward_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bicubic2d_forward_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_bicubic2d_forward(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bicubic2d_forward(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bicubic2d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_bicubic2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_bicubic2d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_upsample_trilinear3d_forward_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_trilinear3d_forward_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_trilinear3d_forward(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_trilinear3d_forward(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_trilinear3d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_trilinear3d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_upsample_nearest1d_forward_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest1d_forward_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_nearest1d_forward(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest1d_forward(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest1d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_nearest1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest1d_backward(_r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_upsample_nearest2d_forward_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest2d_forward_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_nearest2d_forward(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest2d_forward(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest2d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_nearest2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest2d_backward(_r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_upsample_nearest3d_forward_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest3d_forward_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_upsample_nearest3d_forward(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest3d_forward(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest3d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_upsample_nearest3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_upsample_nearest3d_backward(_r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_sigmoid_forward_out(Tensor & output, const Tensor & self) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_sigmoid_forward_out(_w_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_sigmoid_forward(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_sigmoid_forward(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_sigmoid_backward_out(_w_grad_input, _r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_sigmoid_backward(const Tensor & grad_output, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_sigmoid_backward(_r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(output)); | |
} | |
static Tensor & xla__thnn_tanh_forward_out(Tensor & output, const Tensor & self) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_tanh_forward_out(_w_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_tanh_forward(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_tanh_forward(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_tanh_backward_out(_w_grad_input, _r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_tanh_backward(const Tensor & grad_output, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_thnn_tanh_backward(_r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(output)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_columns = columns.alias().ToMutableTensor(); | |
auto _w_ones = ones.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose2d_forward_out(_w_output, _w_columns, _w_ones, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, columns, ones); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose2d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_columns, _r_ones); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_columns, _r_ones, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_finput = finput.alias().ToMutableTensor(); | |
auto _w_fgrad_input = fgrad_input.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose3d_forward_out(_w_output, _w_finput, _w_fgrad_input, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, finput, fgrad_input); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose3d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose3d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_finput, _r_fgrad_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_transpose3d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_finput, _r_fgrad_input, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_finput = finput.alias().ToMutableTensor(); | |
auto _w_fgrad_input = fgrad_input.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv2d_forward_out(_w_output, _w_finput, _w_fgrad_input, _r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, finput, fgrad_input); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv2d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_depthwise2d_forward_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_depthwise2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla__thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_depthwise2d_backward_out(_w_grad_input, _w_grad_weight, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(grad_input, grad_weight); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_depthwise2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_finput = finput.alias().ToMutableTensor(); | |
auto _w_fgrad_input = fgrad_input.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv3d_forward_out(_w_output, _w_finput, _w_fgrad_input, _r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, finput, fgrad_input); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv3d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv3d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv3d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_columns = columns.alias().ToMutableTensor(); | |
auto _w_ones = ones.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated2d_forward_out(_w_output, _w_columns, _w_ones, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, columns, ones); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated2d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_columns = columns.alias().ToMutableTensor(); | |
auto _w_ones = ones.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated3d_forward_out(_w_output, _w_columns, _w_ones, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, columns, ones); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated3d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla__thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated3d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::_thnn_conv_dilated3d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla__thnn_col2im_forward_out(Tensor & output, const Tensor & self, IntList output_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_col2im_forward_out(_w_output, _r_self, output_size, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_col2im_forward(const Tensor & self, IntList output_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_col2im_forward(_r_self, output_size, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_col2im_backward_out(_w_grad_input, _r_grad_output, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_col2im_backward(const Tensor & grad_output, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_col2im_backward(_r_grad_output, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla__thnn_im2col_forward_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_im2col_forward_out(_w_output, _r_self, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__thnn_im2col_forward(const Tensor & self, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_thnn_im2col_forward(_r_self, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__thnn_im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList input_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_im2col_backward_out(_w_grad_input, _r_grad_output, input_size, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla__thnn_im2col_backward(const Tensor & grad_output, IntList input_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::_thnn_im2col_backward(_r_grad_output, input_size, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor xla__cast_Byte(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Byte(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Char(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Char(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Double(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Double(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Float(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Float(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Int(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Int(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Long(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Long(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Short(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Short(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cast_Half(const Tensor & self, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cast_Half(_r_self, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla__fused_dropout(const Tensor & self, double p, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_fused_dropout(_r_self, p, generator); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla__masked_scale(const Tensor & self, const Tensor & mask, double scale) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::_masked_scale(_r_self, _r_mask, scale); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__reshape_from_tensor(const Tensor & self, const Tensor & shape) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_shape = shape.alias().ToTensor(); | |
auto&& __result = at::_reshape_from_tensor(_r_self, _r_shape); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__shape_as_tensor(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_shape_as_tensor(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_dropout(const Tensor & input, double p, bool train) { | |
auto _r_input = input.alias().ToTensor(); | |
auto&& __result = at::dropout(_r_input, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(input)); | |
} | |
static Tensor & xla_dropout_(Tensor & self, double p, bool train) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::dropout_(_w_self, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_feature_dropout(const Tensor & input, double p, bool train) { | |
auto _r_input = input.alias().ToTensor(); | |
auto&& __result = at::feature_dropout(_r_input, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(input)); | |
} | |
static Tensor & xla_feature_dropout_(Tensor & self, double p, bool train) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::feature_dropout_(_w_self, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_alpha_dropout(const Tensor & input, double p, bool train) { | |
auto _r_input = input.alias().ToTensor(); | |
auto&& __result = at::alpha_dropout(_r_input, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(input)); | |
} | |
static Tensor & xla_alpha_dropout_(Tensor & self, double p, bool train) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::alpha_dropout_(_w_self, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_feature_alpha_dropout(const Tensor & input, double p, bool train) { | |
auto _r_input = input.alias().ToTensor(); | |
auto&& __result = at::feature_alpha_dropout(_r_input, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(input)); | |
} | |
static Tensor & xla_feature_alpha_dropout_(Tensor & self, double p, bool train) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::feature_alpha_dropout_(_w_self, p, train); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_abs(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::abs(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_abs_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::abs_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_abs_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::abs_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_acos(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::acos(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_acos_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::acos_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_acos_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::acos_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_avg_pool1d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool1d(_r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_adaptive_avg_pool1d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool1d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_adaptive_max_pool1d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool1d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_add(const Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::add(_r_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_add_(Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::add_(_w_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_add_out(Tensor & result, const Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::add_out(_w_result, _r_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_add_1(const Tensor & self, Scalar other, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::add(_r_self, other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_add__1(Tensor & self, Scalar other, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::add_(_w_self, other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::addmv(_r_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::addmv_(_w_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_addmv_out(Tensor & result, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat = mat.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::addmv_out(_w_result, _r_self, _r_mat, _r_vec, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::addr(_r_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::addr_(_w_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_addr_out(Tensor & result, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec1 = vec1.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::addr_out(_w_result, _r_self, _r_vec1, _r_vec2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_affine_grid_generator(const Tensor & theta, IntList size) { | |
auto _r_theta = theta.alias().ToTensor(); | |
auto&& __result = at::affine_grid_generator(_r_theta, size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(theta)); | |
} | |
static Tensor xla_affine_grid_generator_backward(const Tensor & grad, IntList size) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto&& __result = at::affine_grid_generator_backward(_r_grad, size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad)); | |
} | |
static Tensor xla_all(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::all(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_all_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::all_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static bool xla_allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::allclose(_r_self, _r_other, rtol, atol, equal_nan); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor xla_any(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::any(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_any_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::any_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_arange_out(Tensor & result, Scalar end) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::arange_out(_w_result, end); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_arange_out_1(Tensor & result, Scalar start, Scalar end, Scalar step) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::arange_out(_w_result, start, end, step); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__dim_arange(const Tensor & like, int64_t dim) { | |
auto _r_like = like.alias().ToTensor(); | |
auto&& __result = at::_dim_arange(_r_like, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(like)); | |
} | |
static Tensor xla_argmax(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::argmax(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_argmax_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::argmax(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__argmax(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_argmax(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_argmin(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::argmin(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_argmin_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::argmin(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__argmin(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_argmin(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_as_strided(const Tensor & self, IntList size, IntList stride, c10::optional<int64_t> storage_offset) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::as_strided(_r_self, size, stride, storage_offset); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_as_strided_(Tensor & self, IntList size, IntList stride, c10::optional<int64_t> storage_offset) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::as_strided_(_w_self, size, stride, storage_offset); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_asin(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::asin(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_asin_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::asin_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_asin_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::asin_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_atan(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::atan(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_atan_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::atan_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_atan_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::atan_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::baddbmm(_r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::baddbmm_(_w_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla__baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::_baddbmm_mkl_(_w_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_baddbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::baddbmm_out(_w_result, _r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto&& __result = at::batch_norm(_r_input, _r_weight, _r_bias, _r_running_mean, _r_running_var, training, momentum, eps, cudnn_enabled); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(running_var)); | |
} | |
static Tensor xla_bernoulli(const Tensor & self, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::bernoulli(_r_self, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_bernoulli_out(Tensor & result, const Tensor & self, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::bernoulli_out(_w_result, _r_self, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_bernoulli_(Tensor & self, const Tensor & p, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_p = p.alias().ToTensor(); | |
auto&& __result = at::bernoulli_(_w_self, _r_p, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_bernoulli__1(Tensor & self, double p, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::bernoulli_(_w_self, p, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_bernoulli_1(const Tensor & self, double p, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::bernoulli(_r_self, p, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias) { | |
auto _r_input1 = input1.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::bilinear(_r_input1, _r_input2, _r_weight, _r_bias); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_pos_weight = pos_weight.alias().ToTensor(); | |
auto&& __result = at::binary_cross_entropy_with_logits(_r_self, _r_target, _r_weight, _r_pos_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_pos_weight = pos_weight.alias().ToTensor(); | |
auto&& __result = at::binary_cross_entropy_with_logits_backward(_r_grad_output, _r_self, _r_target, _r_weight, _r_pos_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_bincount(const Tensor & self, const Tensor & weights, int64_t minlength) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weights = weights.alias().ToTensor(); | |
auto&& __result = at::bincount(_r_self, _r_weights, minlength); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_bmm(const Tensor & self, const Tensor & mat2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::bmm(_r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_bmm_out(Tensor & result, const Tensor & self, const Tensor & mat2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::bmm_out(_w_result, _r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static std::vector<Tensor> xla_broadcast_tensors(TensorList tensors) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::broadcast_tensors(_l_tensors); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensors(__result); | |
} | |
static Tensor xla_cat(TensorList tensors, int64_t dim) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::cat(_l_tensors, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(tensors)); | |
} | |
static Tensor & xla_cat_out(Tensor & result, TensorList tensors, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::cat_out(_w_result, _l_tensors, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ceil(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ceil(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ceil_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::ceil_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_ceil_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ceil_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_chain_matmul(TensorList matrices) { | |
auto _l_matrices = XlaCreateTensorList(matrices); | |
auto&& __result = at::chain_matmul(_l_matrices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(matrices)); | |
} | |
static std::vector<Tensor> xla_chunk(const Tensor & self, int64_t chunks, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::chunk(_r_self, chunks, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensors(__result); | |
} | |
static Tensor xla_clamp(const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clamp(_r_self, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_clamp_(Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::clamp_(_w_self, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_clamp_out(Tensor & result, const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clamp_out(_w_result, _r_self, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_clamp_max(const Tensor & self, Scalar max) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clamp_max(_r_self, max); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_clamp_max_(Tensor & self, Scalar max) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::clamp_max_(_w_self, max); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_clamp_max_out(Tensor & result, const Tensor & self, Scalar max) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clamp_max_out(_w_result, _r_self, max); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_clamp_min(const Tensor & self, Scalar min) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clamp_min(_r_self, min); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_clamp_min_(Tensor & self, Scalar min) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::clamp_min_(_w_self, min); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_clamp_min_out(Tensor & result, const Tensor & self, Scalar min) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clamp_min_out(_w_result, _r_self, min); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_constant_pad_nd(const Tensor & self, IntList pad, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::constant_pad_nd(_r_self, pad, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_contiguous(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::contiguous(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::convolution(_r_input, _r_weight, _r_bias, stride, padding, dilation, transposed, output_padding, groups); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla__convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_convolution(_r_input, _r_weight, _r_bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla__convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::_convolution_nogroup(_r_input, _r_weight, _r_bias, stride, padding, dilation, transposed, output_padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) { | |
auto _r_ggI = ggI.alias().ToTensor(); | |
auto _r_ggW = ggW.alias().ToTensor(); | |
auto _r_ggb = ggb.alias().ToTensor(); | |
auto _r_gO = gO.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_convolution_double_backward(_r_ggI, _r_ggW, _r_ggb, _r_gO, _r_weight, _r_self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv1d(_r_input, _r_weight, _r_bias, stride, padding, dilation, groups); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv2d(_r_input, _r_weight, _r_bias, stride, padding, dilation, groups); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv3d(_r_input, _r_weight, _r_bias, stride, padding, dilation, groups); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv_tbc(_r_self, _r_weight, _r_bias, pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv_tbc_backward(_r_self, _r_input, _r_weight, _r_bias, pad); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv_transpose1d(_r_input, _r_weight, _r_bias, stride, padding, output_padding, groups, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv_transpose2d(_r_input, _r_weight, _r_bias, stride, padding, output_padding, groups, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::conv_transpose3d(_r_input, _r_weight, _r_bias, stride, padding, output_padding, groups, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor & xla_s_copy_(Tensor & self, const Tensor & src, bool non_blocking) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::s_copy_(_w_self, _r_src, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla__s_copy_from(const Tensor & self, const Tensor & dst, bool non_blocking) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_dst = dst.alias().ToTensor(); | |
auto&& __result = at::_s_copy_from(_r_self, _r_dst, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static void xla__copy_same_type_(Tensor & self, const Tensor & src) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
at::_copy_same_type_(_w_self, _r_src); | |
} | |
static Tensor xla_cos(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cos(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_cos_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::cos_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_cos_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cos_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_cosh(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cosh(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_cosh_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::cosh_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_cosh_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cosh_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) { | |
auto _r_input1 = input1.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::cosine_embedding_loss(_r_input1, _r_input2, _r_target, margin, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(target)); | |
} | |
static Tensor xla_cumsum(const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumsum(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_cumsum_1(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumsum(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_cumsum_out(Tensor & result, const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumsum_out(_w_result, _r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_cumsum_out_1(Tensor & result, const Tensor & self, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumsum_out(_w_result, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_cumprod(const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumprod(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_cumprod_1(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumprod(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_cumprod_out(Tensor & result, const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumprod_out(_w_result, _r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_cumprod_out_1(Tensor & result, const Tensor & self, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cumprod_out(_w_result, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntList input_lengths, IntList target_lengths, int64_t blank, int64_t reduction) { | |
auto _r_log_probs = log_probs.alias().ToTensor(); | |
auto _r_targets = targets.alias().ToTensor(); | |
auto&& __result = at::ctc_loss(_r_log_probs, _r_targets, input_lengths, target_lengths, blank, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(targets)); | |
} | |
static Tensor xla_ctc_loss_1(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction) { | |
auto _r_log_probs = log_probs.alias().ToTensor(); | |
auto _r_targets = targets.alias().ToTensor(); | |
auto _r_input_lengths = input_lengths.alias().ToTensor(); | |
auto _r_target_lengths = target_lengths.alias().ToTensor(); | |
auto&& __result = at::ctc_loss(_r_log_probs, _r_targets, _r_input_lengths, _r_target_lengths, blank, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(target_lengths)); | |
} | |
static std::tuple<Tensor,Tensor> xla__ctc_loss(const Tensor & log_probs, const Tensor & targets, IntList input_lengths, IntList target_lengths, int64_t blank) { | |
auto _r_log_probs = log_probs.alias().ToTensor(); | |
auto _r_targets = targets.alias().ToTensor(); | |
auto&& __result = at::_ctc_loss(_r_log_probs, _r_targets, input_lengths, target_lengths, blank); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(targets)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(targets))); | |
} | |
static Tensor xla__ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntList input_lengths, IntList target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_log_probs = log_probs.alias().ToTensor(); | |
auto _r_targets = targets.alias().ToTensor(); | |
auto _r_neg_log_likelihood = neg_log_likelihood.alias().ToTensor(); | |
auto _r_log_alpha = log_alpha.alias().ToTensor(); | |
auto&& __result = at::_ctc_loss_backward(_r_grad, _r_log_probs, _r_targets, input_lengths, target_lengths, _r_neg_log_likelihood, _r_log_alpha, blank); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(log_alpha)); | |
} | |
static Tensor xla_det(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::det(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::diag_embed(_r_self, offset, dim1, dim2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_diagflat(const Tensor & self, int64_t offset) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::diagflat(_r_self, offset); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::diagonal(_r_self, offset, dim1, dim2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_div(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::div(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_div_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::div_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_div_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::div_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_div_1(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::div(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_div__1(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::div_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_dot(const Tensor & self, const Tensor & tensor) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor = tensor.alias().ToTensor(); | |
auto&& __result = at::dot(_r_self, _r_tensor); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_dot_out(Tensor & result, const Tensor & self, const Tensor & tensor) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor = tensor.alias().ToTensor(); | |
auto&& __result = at::dot_out(_w_result, _r_self, _r_tensor); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_einsum(std::string equation, TensorList tensors) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::einsum(equation, _l_tensors); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(equation)); | |
} | |
static Tensor xla_embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::embedding(_r_weight, _r_indices, padding_idx, scale_grad_by_freq, sparse); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(indices)); | |
} | |
static Tensor xla_embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::embedding_backward(_r_grad, _r_indices, num_weights, padding_idx, scale_grad_by_freq, sparse); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(indices)); | |
} | |
static Tensor xla_embedding_dense_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::embedding_dense_backward(_r_grad, _r_indices, num_weights, padding_idx, scale_grad_by_freq); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(indices)); | |
} | |
static Tensor & xla_embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::embedding_renorm_(_w_self, _r_indices, max_norm, norm_type); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::embedding_sparse_backward(_r_grad, _r_indices, num_weights, padding_idx, scale_grad_by_freq); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(indices)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor,Tensor> xla_embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) { | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto _r_offsets = offsets.alias().ToTensor(); | |
auto&& __result = at::embedding_bag(_r_weight, _r_indices, _r_offsets, scale_grad_by_freq, mode, sparse); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(offsets)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(offsets)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(offsets)), CreateXlaTensor(__result.get<3>(), XlaTensorDevice(offsets))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor,Tensor> xla__embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) { | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto _r_offsets = offsets.alias().ToTensor(); | |
auto&& __result = at::_embedding_bag(_r_weight, _r_indices, _r_offsets, scale_grad_by_freq, mode, sparse); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(offsets)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(offsets)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(offsets)), CreateXlaTensor(__result.get<3>(), XlaTensorDevice(offsets))); | |
} | |
static Tensor xla__embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto _r_offsets = offsets.alias().ToTensor(); | |
auto _r_offset2bag = offset2bag.alias().ToTensor(); | |
auto _r_bag_size = bag_size.alias().ToTensor(); | |
auto _r_maximum_indices = maximum_indices.alias().ToTensor(); | |
auto&& __result = at::_embedding_bag_backward(_r_grad, _r_indices, _r_offsets, _r_offset2bag, _r_bag_size, _r_maximum_indices, num_weights, scale_grad_by_freq, mode, sparse); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(maximum_indices)); | |
} | |
static Tensor xla__embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto _r_offsets = offsets.alias().ToTensor(); | |
auto _r_offset2bag = offset2bag.alias().ToTensor(); | |
auto _r_bag_size = bag_size.alias().ToTensor(); | |
auto&& __result = at::_embedding_bag_sparse_backward(_r_grad, _r_indices, _r_offsets, _r_offset2bag, _r_bag_size, num_weights, scale_grad_by_freq, mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bag_size)); | |
} | |
static Tensor xla__embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto _r_offsets = offsets.alias().ToTensor(); | |
auto _r_offset2bag = offset2bag.alias().ToTensor(); | |
auto _r_bag_size = bag_size.alias().ToTensor(); | |
auto _r_maximum_indices = maximum_indices.alias().ToTensor(); | |
auto&& __result = at::_embedding_bag_dense_backward(_r_grad, _r_indices, _r_offsets, _r_offset2bag, _r_bag_size, _r_maximum_indices, num_weights, scale_grad_by_freq, mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(maximum_indices)); | |
} | |
static Tensor xla_empty(IntList size, const TensorOptions & options) { | |
auto&& __result = at::empty(size, options); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(size)); | |
} | |
static Tensor & xla_resize_(Tensor & self, IntList size) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::resize_(_w_self, size); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_empty_out(Tensor & result, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::empty_out(_w_result, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_empty_like(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::empty_like(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_empty_strided(IntList size, IntList stride, const TensorOptions & options) { | |
auto&& __result = at::empty_strided(size, stride, options); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(size)); | |
} | |
static Tensor xla_erf(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::erf(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_erf_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::erf_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_erf_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::erf_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_erfc(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::erfc(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_erfc_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::erfc_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_erfc_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::erfc_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_exp(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::exp(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_exp_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::exp_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_exp_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::exp_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_expm1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::expm1(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_expm1_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::expm1_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_expm1_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::expm1_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_expand(const Tensor & self, IntList size, bool implicit) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::expand(_r_self, size, implicit); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_expand_as(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::expand_as(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_eye_out(Tensor & result, int64_t n) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::eye_out(_w_result, n); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_eye_out_1(Tensor & result, int64_t n, int64_t m) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::eye_out(_w_result, n, m); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_flatten(const Tensor & self, int64_t start_dim, int64_t end_dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::flatten(_r_self, start_dim, end_dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_fill_(Tensor & self, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::fill_(_w_self, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_fill__1(Tensor & self, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::fill_(_w_self, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_floor(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::floor(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_floor_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::floor_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_floor_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::floor_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_full_out(Tensor & result, IntList size, Scalar fill_value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::full_out(_w_result, size, fill_value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_full_like(const Tensor & self, Scalar fill_value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::full_like(_r_self, fill_value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_grid = grid.alias().ToTensor(); | |
auto&& __result = at::grid_sampler(_r_input, _r_grid, interpolation_mode, padding_mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grid)); | |
} | |
static Tensor xla_grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_grid = grid.alias().ToTensor(); | |
auto&& __result = at::grid_sampler_2d(_r_input, _r_grid, interpolation_mode, padding_mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grid)); | |
} | |
static std::tuple<Tensor,Tensor> xla_grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_grid = grid.alias().ToTensor(); | |
auto&& __result = at::grid_sampler_2d_backward(_r_grad_output, _r_input, _r_grid, interpolation_mode, padding_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(grid)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(grid))); | |
} | |
static Tensor xla_grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_grid = grid.alias().ToTensor(); | |
auto&& __result = at::grid_sampler_3d(_r_input, _r_grid, interpolation_mode, padding_mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grid)); | |
} | |
static std::tuple<Tensor,Tensor> xla_grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_grid = grid.alias().ToTensor(); | |
auto&& __result = at::grid_sampler_3d_backward(_r_grad_output, _r_input, _r_grid, interpolation_mode, padding_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(grid)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(grid))); | |
} | |
static Tensor xla_hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::hinge_embedding_loss(_r_self, _r_target, margin, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_ger(const Tensor & self, const Tensor & vec2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::ger(_r_self, _r_vec2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ger_out(Tensor & result, const Tensor & self, const Tensor & vec2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec2 = vec2.alias().ToTensor(); | |
auto&& __result = at::ger_out(_w_result, _r_self, _r_vec2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static std::tuple<Tensor,Tensor> xla_gesv(const Tensor & self, const Tensor & A) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::gesv(_r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_gesv_out(Tensor & solution, Tensor & lu, const Tensor & self, const Tensor & A) { | |
auto _w_solution = solution.alias().ToMutableTensor(); | |
auto _w_lu = lu.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::gesv_out(_w_solution, _w_lu, _r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(solution, lu); | |
} | |
static std::tuple<Tensor,Tensor> xla__gesv_helper(const Tensor & self, const Tensor & A) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::_gesv_helper(_r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_group_norm(const Tensor & input, int64_t num_groups, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enabled) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::group_norm(_r_input, num_groups, _r_weight, _r_bias, eps, cudnn_enabled); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_fft(const Tensor & self, int64_t signal_ndim, bool normalized) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::fft(_r_self, signal_ndim, normalized); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_ifft(const Tensor & self, int64_t signal_ndim, bool normalized) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ifft(_r_self, signal_ndim, normalized); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_rfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rfft(_r_self, signal_ndim, normalized, onesided); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_irfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntList signal_sizes) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::irfft(_r_self, signal_ndim, normalized, onesided, signal_sizes); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntList checked_signal_sizes, bool normalized, bool onesided, IntList output_sizes) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_fft_with_size(_r_self, signal_ndim, complex_input, complex_output, inverse, checked_signal_sizes, normalized, onesided, output_sizes); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static void xla__cufft_set_plan_cache_max_size(int64_t max_size) { | |
at::_cufft_set_plan_cache_max_size(max_size); | |
} | |
static Tensor xla_index(const Tensor & self, TensorList indices) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _l_indices = XlaCreateTensorList(indices); | |
auto&& __result = at::index(_r_self, _l_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::index_copy_(_w_self, dim, _r_index, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_index_put(const Tensor & self, TensorList indices, const Tensor & values, bool accumulate) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _l_indices = XlaCreateTensorList(indices); | |
auto _r_values = values.alias().ToTensor(); | |
auto&& __result = at::index_put(_r_self, _l_indices, _r_values, accumulate); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_index_put_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _l_indices = XlaCreateTensorList(indices); | |
auto _r_values = values.alias().ToTensor(); | |
auto&& __result = at::index_put_(_w_self, _l_indices, _r_values, accumulate); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_instance_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto&& __result = at::instance_norm(_r_input, _r_weight, _r_bias, _r_running_mean, _r_running_var, use_input_stats, momentum, eps, cudnn_enabled); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(running_var)); | |
} | |
static Tensor xla_inverse(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::inverse(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_inverse_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::inverse_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__inverse_helper(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_inverse_helper(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::isclose(_r_self, _r_other, rtol, atol, equal_nan); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_isnan(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::isnan(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static bool xla_is_distributed(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::is_distributed(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static bool xla_is_floating_point(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::is_floating_point(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static bool xla_is_complex(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::is_complex(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static bool xla_is_nonzero(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::is_nonzero(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static bool xla_is_same_size(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::is_same_size(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static bool xla_is_signed(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::is_signed(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor xla_kl_div(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::kl_div(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::kl_div_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::kthvalue(_r_self, k, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::kthvalue_out(_w_values, _w_indices, _r_self, k, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static Tensor xla_layer_norm(const Tensor & input, IntList normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::layer_norm(_r_input, normalized_shape, _r_weight, _r_bias, eps, cudnn_enable); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_linear(const Tensor & input, const Tensor & weight, const Tensor & bias) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::linear(_r_input, _r_weight, _r_bias); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static Tensor xla_fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_packed = packed.alias().ToTensor(); | |
auto _r_col_offsets = col_offsets.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::fbgemm_linear_int8_weight(_r_input, _r_weight, _r_packed, _r_col_offsets, weight_scale, weight_zero_point, _r_bias); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(bias)); | |
} | |
static std::tuple<Tensor,Tensor,double,int64_t> xla_fbgemm_linear_quantize_weight(const Tensor & input) { | |
auto _r_input = input.alias().ToTensor(); | |
auto&& __result = at::fbgemm_linear_quantize_weight(_r_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,double,int64_t>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<3>(), XlaTensorDevice(input))); | |
} | |
static Tensor xla_fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N) { | |
auto _r_input = input.alias().ToTensor(); | |
auto&& __result = at::fbgemm_pack_quantized_matrix(_r_input, K, N); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(input)); | |
} | |
static Tensor & xla_linspace_out(Tensor & result, Scalar start, Scalar end, int64_t steps) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::linspace_out(_w_result, start, end, steps); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_log(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_log_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::log_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_log_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_log10(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log10(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_log10_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::log10_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_log10_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log10_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_log1p(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log1p(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_log1p_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::log1p_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_log1p_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log1p_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_log2(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log2(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_log2_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::log2_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_log2_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log2_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_logdet(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::logdet(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_logspace_out(Tensor & result, Scalar start, Scalar end, int64_t steps) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::logspace_out(_w_result, start, end, steps); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_log_softmax(const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_softmax(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_log_softmax_1(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_softmax(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__log_softmax(const Tensor & self, int64_t dim, bool half_to_float) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_log_softmax(_r_self, dim, half_to_float); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_log_softmax_backward_data(_r_grad_output, _r_output, dim, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_logsumexp(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::logsumexp(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_logsumexp_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::logsumexp_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) { | |
auto _r_input1 = input1.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::margin_ranking_loss(_r_input1, _r_input2, _r_target, margin, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(target)); | |
} | |
static Tensor xla_matmul(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::matmul(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_matmul_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::matmul_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_matrix_rank(const Tensor & self, double tol, bool symmetric) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::matrix_rank(_r_self, tol, symmetric); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_matrix_rank_1(const Tensor & self, bool symmetric) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::matrix_rank(_r_self, symmetric); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_matrix_power(const Tensor & self, int64_t n) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::matrix_power(_r_self, n); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_max(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_max_out(Tensor & max, Tensor & max_values, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_max = max.alias().ToMutableTensor(); | |
auto _w_max_values = max_values.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_out(_w_max, _w_max_values, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(max, max_values); | |
} | |
static Tensor xla_max_values(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_values(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_max_pool1d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool1d_with_indices(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_max_pool1d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool1d(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_max_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool2d(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_max_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool3d(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mean(const Tensor & self, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean(_r_self, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mean_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mean_2(const Tensor & self, IntList dim, bool keepdim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean(_r_self, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mean_3(const Tensor & self, IntList dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mean_4(const Tensor & self, IntList dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mean_out(Tensor & result, const Tensor & self, IntList dim, bool keepdim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean_out(_w_result, _r_self, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_mean_out_1(Tensor & result, const Tensor & self, IntList dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_mean_out_2(Tensor & result, const Tensor & self, IntList dim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mean_out(_w_result, _r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static std::tuple<Tensor,Tensor> xla_median(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::median(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_median_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::median_out(_w_values, _w_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_min(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::min(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_min = min.alias().ToMutableTensor(); | |
auto _w_min_indices = min_indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::min_out(_w_min, _w_min_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(min, min_indices); | |
} | |
static Tensor xla_min_values(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::min_values(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mkldnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList stride, IntList dilation, int64_t groups) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::mkldnn_convolution(_r_self, _r_weight, _r_bias, padding, stride, dilation, groups); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mkldnn_convolution_backward_input(IntList self_size, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool bias_defined) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::mkldnn_convolution_backward_input(self_size, _r_grad_output, _r_weight, padding, stride, dilation, groups, bias_defined); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(weight)); | |
} | |
static std::tuple<Tensor,Tensor> xla_mkldnn_convolution_backward_weights(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool bias_defined) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mkldnn_convolution_backward_weights(weight_size, _r_grad_output, _r_self, padding, stride, dilation, groups, bias_defined); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_mkldnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, std::array<bool,3> output_mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::mkldnn_convolution_backward(_r_self, _r_grad_output, _r_weight, padding, stride, dilation, groups, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_miopen_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto&& __result = at::miopen_batch_norm(_r_input, _r_weight, _r_bias, _r_running_mean, _r_running_var, training, exponential_average_factor, epsilon); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(running_var)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(running_var)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(running_var))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto _r_save_mean = save_mean.alias().ToTensor(); | |
auto _r_save_var = save_var.alias().ToTensor(); | |
auto&& __result = at::miopen_batch_norm_backward(_r_input, _r_grad_output, _r_weight, _r_running_mean, _r_running_var, _r_save_mean, _r_save_var, epsilon); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(save_var)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(save_var)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(save_var))); | |
} | |
static Tensor xla_miopen_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution(_r_self, _r_weight, _r_bias, padding, stride, dilation, groups, benchmark, deterministic); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_miopen_convolution_backward_input(IntList self_size, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_backward_input(self_size, _r_grad_output, _r_weight, padding, stride, dilation, groups, benchmark, deterministic); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(weight)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_miopen_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_backward(_r_self, _r_grad_output, _r_weight, padding, stride, dilation, groups, benchmark, deterministic, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_miopen_convolution_backward_bias(const Tensor & grad_output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_backward_bias(_r_grad_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor xla_miopen_convolution_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_backward_weight(weight_size, _r_grad_output, _r_self, padding, stride, dilation, groups, benchmark, deterministic); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_transpose(_r_self, _r_weight, _r_bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_miopen_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_transpose_backward(_r_self, _r_grad_output, _r_weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_miopen_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_transpose_backward_input(_r_grad_output, _r_weight, padding, stride, dilation, groups, benchmark, deterministic); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(weight)); | |
} | |
static Tensor xla_miopen_convolution_transpose_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::miopen_convolution_transpose_backward_weight(weight_size, _r_grad_output, _r_self, padding, stride, dilation, groups, benchmark, deterministic); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_mm(const Tensor & self, const Tensor & mat2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::mm(_r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mm_out(Tensor & result, const Tensor & self, const Tensor & mat2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::mm_out(_w_result, _r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla__sparse_mm(const Tensor & sparse, const Tensor & dense) { | |
auto _r_sparse = sparse.alias().ToTensor(); | |
auto _r_dense = dense.alias().ToTensor(); | |
auto&& __result = at::_sparse_mm(_r_sparse, _r_dense); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(dense)); | |
} | |
static std::tuple<Tensor,Tensor> xla_mode(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mode(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mode_out(_w_values, _w_indices, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static Tensor xla_mul(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::mul(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mul_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::mul_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_mul_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::mul_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_mul_1(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mul(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mul__1(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::mul_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_mv(const Tensor & self, const Tensor & vec) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::mv(_r_self, _r_vec); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mv_out(Tensor & result, const Tensor & self, const Tensor & vec) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_vec = vec.alias().ToTensor(); | |
auto&& __result = at::mv_out(_w_result, _r_self, _r_vec); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_mvlgamma(const Tensor & self, int64_t p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::mvlgamma(_r_self, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mvlgamma_(Tensor & self, int64_t p) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::mvlgamma_(_w_self, p); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_narrow_copy(const Tensor & self, int64_t dim, int64_t start, int64_t length) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::narrow_copy(_r_self, dim, start, length); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::narrow(_r_self, dim, start, length); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_native_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto&& __result = at::native_batch_norm(_r_input, _r_weight, _r_bias, _r_running_mean, _r_running_var, training, momentum, eps); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(running_var)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(running_var)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(running_var))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_invstd, bool train, double eps, std::array<bool,3> output_mask) { | |
auto _r_grad_out = grad_out.alias().ToTensor(); | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto _r_save_mean = save_mean.alias().ToTensor(); | |
auto _r_save_invstd = save_invstd.alias().ToTensor(); | |
auto&& __result = at::native_batch_norm_backward(_r_grad_out, _r_input, _r_weight, _r_running_mean, _r_running_var, _r_save_mean, _r_save_invstd, train, eps, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(save_invstd)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(save_invstd)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(save_invstd))); | |
} | |
static std::tuple<Tensor,Tensor> xla_batch_norm_update_stats(const Tensor & input, const Tensor & running_mean, const Tensor & running_var, double momentum) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_running_mean = running_mean.alias().ToTensor(); | |
auto _r_running_var = running_var.alias().ToTensor(); | |
auto&& __result = at::batch_norm_update_stats(_r_input, _r_running_mean, _r_running_var, momentum); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(running_var)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(running_var))); | |
} | |
static Tensor & xla_ones_out(Tensor & result, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::ones_out(_w_result, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ones_like(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ones_like(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim) { | |
auto _r_x1 = x1.alias().ToTensor(); | |
auto _r_x2 = x2.alias().ToTensor(); | |
auto&& __result = at::pairwise_distance(_r_x1, _r_x2, p, eps, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(x2)); | |
} | |
static Tensor xla_pdist(const Tensor & self, double p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pdist(_r_self, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__pdist_forward(const Tensor & self, double p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_pdist_forward(_r_self, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_pdist = pdist.alias().ToTensor(); | |
auto&& __result = at::_pdist_backward(_r_grad, _r_self, p, _r_pdist); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps) { | |
auto _r_x1 = x1.alias().ToTensor(); | |
auto _r_x2 = x2.alias().ToTensor(); | |
auto&& __result = at::cosine_similarity(_r_x1, _r_x2, dim, eps); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(x2)); | |
} | |
static Tensor xla_permute(const Tensor & self, IntList dims) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::permute(_r_self, dims); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_pixel_shuffle(const Tensor & self, int64_t upscale_factor) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pixel_shuffle(_r_self, upscale_factor); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_pin_memory(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pin_memory(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_pinverse(const Tensor & self, double rcond) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pinverse(_r_self, rcond); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_rand_out(Tensor & result, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::rand_out(_w_result, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_rand_out_1(Tensor & result, IntList size, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::rand_out(_w_result, size, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_rand_like(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rand_like(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_randint_out(Tensor & result, int64_t high, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randint_out(_w_result, high, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_randint_out_1(Tensor & result, int64_t high, IntList size, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randint_out(_w_result, high, size, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_randint_out_2(Tensor & result, int64_t low, int64_t high, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randint_out(_w_result, low, high, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_randint_out_3(Tensor & result, int64_t low, int64_t high, IntList size, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randint_out(_w_result, low, high, size, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_randint_like(const Tensor & self, int64_t high) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::randint_like(_r_self, high); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_randint_like_1(const Tensor & self, int64_t low, int64_t high) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::randint_like(_r_self, low, high); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_randn_out(Tensor & result, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randn_out(_w_result, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_randn_out_1(Tensor & result, IntList size, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randn_out(_w_result, size, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_randn_like(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::randn_like(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_randperm_out(Tensor & result, int64_t n) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randperm_out(_w_result, n); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_randperm_out_1(Tensor & result, int64_t n, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::randperm_out(_w_result, n, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_range_out(Tensor & result, Scalar start, Scalar end, Scalar step) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::range_out(_w_result, start, end, step); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_repeat(const Tensor & self, IntList repeats) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::repeat(_r_self, repeats); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_reshape(const Tensor & self, IntList shape) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reshape(_r_self, shape); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_reshape_as(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::reshape_as(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_RoiPooling2d_forward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_rois = rois.alias().ToTensor(); | |
auto&& __result = at::RoiPooling2d_forward(_r_input, _r_rois, pooledHeight, pooledWidth, spatialScale); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(rois)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(rois))); | |
} | |
static Tensor xla_RoiPooling2d_backward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale, const Tensor & gradOutput, const Tensor & argmaxes) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_rois = rois.alias().ToTensor(); | |
auto _r_gradOutput = gradOutput.alias().ToTensor(); | |
auto _r_argmaxes = argmaxes.alias().ToTensor(); | |
auto&& __result = at::RoiPooling2d_backward(_r_input, _r_rois, pooledHeight, pooledWidth, spatialScale, _r_gradOutput, _r_argmaxes); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(argmaxes)); | |
} | |
static Tensor xla_round(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::round(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_round_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::round_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_round_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::round_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rrelu(_r_self, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::rrelu_(_w_self, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_relu(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::relu(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_relu_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::relu_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_prelu(const Tensor & self, const Tensor & weight) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::prelu(_r_self, _r_weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::prelu_backward(_r_grad_output, _r_self, _r_weight); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_hardshrink(const Tensor & self, Scalar lambd) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::hardshrink(_r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_hardshrink_backward(const Tensor & grad_out, const Tensor & self, Scalar lambd) { | |
auto _r_grad_out = grad_out.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::hardshrink_backward(_r_grad_out, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_rsqrt(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rsqrt(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_rsqrt_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::rsqrt_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_rsqrt_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rsqrt_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_select(const Tensor & self, int64_t dim, int64_t index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::select(_r_self, dim, index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_selu(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::selu(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_selu_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::selu_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_celu(const Tensor & self, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::celu(_r_self, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_celu_(Tensor & self, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::celu_(_w_self, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_sigmoid(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sigmoid(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sigmoid_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sigmoid_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sigmoid_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sigmoid_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_sin(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sin(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sin_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sin_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sin_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sin_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_sinh(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sinh(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sinh_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sinh_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sinh_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sinh_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_detach(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::detach(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_detach_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::detach_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static int64_t xla_size(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::size(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor xla_slice(const Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::slice(_r_self, dim, start, end, step); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla_slogdet(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::slogdet(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_smm(const Tensor & self, const Tensor & mat2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::smm(_r_self, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_softmax(const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softmax(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_softmax_1(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softmax(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__softmax(const Tensor & self, int64_t dim, bool half_to_float) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_softmax(_r_self, dim, half_to_float); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_softmax_backward_data(_r_grad_output, _r_output, dim, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__sparse_add_out(Tensor & result, const Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_sparse_add_out(_w_result, _r_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla__sparse_dense_add_out(Tensor & result, const Tensor & self, SparseTensorRef other, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_dense_add_out(_w_result, _r_self, other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla__sparse_div_zerodim_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_sparse_div_zerodim_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla__sparse_div_scalar_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_div_scalar_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla__sparse_mul_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_sparse_mul_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla__sparse_mul_zerodim_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_sparse_mul_zerodim_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla__sparse_mul_scalar_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_mul_scalar_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static std::vector<Tensor> xla_split(const Tensor & self, int64_t split_size, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::split(_r_self, split_size, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensors(__result); | |
} | |
static std::vector<Tensor> xla_split_with_sizes(const Tensor & self, IntList split_sizes, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::split_with_sizes(_r_self, split_sizes, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensors(__result); | |
} | |
static Tensor xla_squeeze(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::squeeze(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_squeeze_1(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::squeeze(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_squeeze_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::squeeze_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_squeeze__1(Tensor & self, int64_t dim) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::squeeze_(_w_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::sspaddmm(_r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sspaddmm_out(Tensor & result, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::sspaddmm_out(_w_result, _r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_stack(TensorList tensors, int64_t dim) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::stack(_l_tensors, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(tensors)); | |
} | |
static Tensor & xla_stack_out(Tensor & result, TensorList tensors, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::stack_out(_w_result, _l_tensors, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_stft(const Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const Tensor & window, bool normalized, bool onesided) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_window = window.alias().ToTensor(); | |
auto&& __result = at::stft(_r_self, n_fft, hop_length, win_length, _r_window, normalized, onesided); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static int64_t xla_stride(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::stride(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor xla_sum(const Tensor & self, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum(_r_self, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_sum_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_sum_2(const Tensor & self, IntList dim, bool keepdim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum(_r_self, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_sum_3(const Tensor & self, IntList dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_sum_4(const Tensor & self, IntList dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sum_out(Tensor & result, const Tensor & self, IntList dim, bool keepdim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum_out(_w_result, _r_self, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_sum_out_1(Tensor & result, const Tensor & self, IntList dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_sum_out_2(Tensor & result, const Tensor & self, IntList dim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum_out(_w_result, _r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_sum_to_size(const Tensor & self, IntList size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sum_to_size(_r_self, size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_sqrt(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sqrt(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sqrt_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sqrt_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sqrt_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sqrt_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_std(const Tensor & self, bool unbiased) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::std(_r_self, unbiased); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_std_1(const Tensor & self, IntList dim, bool unbiased, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::std(_r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_std_out(Tensor & result, const Tensor & self, IntList dim, bool unbiased, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::std_out(_w_result, _r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_prod(const Tensor & self, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod(_r_self, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_prod_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_prod_2(const Tensor & self, int64_t dim, bool keepdim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod(_r_self, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_prod_3(const Tensor & self, int64_t dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_prod_4(const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_prod_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod_out(_w_result, _r_self, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_prod_out_1(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_prod_out_2(Tensor & result, const Tensor & self, int64_t dim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::prod_out(_w_result, _r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_t(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::t(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_t_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::t_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_tan(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::tan(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_tan_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::tan_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_tan_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::tan_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_tanh(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::tanh(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_tanh_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::tanh_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_tanh_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::tanh_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_tensordot(const Tensor & self, const Tensor & other, IntList dims_self, IntList dims_other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::tensordot(_r_self, _r_other, dims_self, dims_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_threshold(const Tensor & self, Scalar threshold, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::threshold(_r_self, threshold, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_threshold_(Tensor & self, Scalar threshold, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::threshold_(_w_self, threshold, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_threshold_out(Tensor & result, const Tensor & self, Scalar threshold, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::threshold_out(_w_result, _r_self, threshold, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::threshold_backward(_r_grad_output, _r_self, threshold); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_transpose(const Tensor & self, int64_t dim0, int64_t dim1) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::transpose(_r_self, dim0, dim1); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_transpose_(Tensor & self, int64_t dim0, int64_t dim1) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::transpose_(_w_self, dim0, dim1); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_one_hot(const Tensor & self, int64_t num_classes) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::one_hot(_r_self, num_classes); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_flip(const Tensor & self, IntList dims) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::flip(_r_self, dims); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_roll(const Tensor & self, IntList shifts, IntList dims) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::roll(_r_self, shifts, dims); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_rot90(const Tensor & self, int64_t k, IntList dims) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rot90(_r_self, k, dims); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntList expand1, IntList expand2, IntList expand3, IntList sumdim, int64_t unroll_dim) { | |
auto _r_i1 = i1.alias().ToTensor(); | |
auto _r_i2 = i2.alias().ToTensor(); | |
auto _r_i3 = i3.alias().ToTensor(); | |
auto&& __result = at::_trilinear(_r_i1, _r_i2, _r_i3, expand1, expand2, expand3, sumdim, unroll_dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(i3)); | |
} | |
static Tensor xla_triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) { | |
auto _r_anchor = anchor.alias().ToTensor(); | |
auto _r_positive = positive.alias().ToTensor(); | |
auto _r_negative = negative.alias().ToTensor(); | |
auto&& __result = at::triplet_margin_loss(_r_anchor, _r_positive, _r_negative, margin, p, eps, swap, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(negative)); | |
} | |
static Tensor xla_trunc(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::trunc(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_trunc_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::trunc_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_trunc_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::trunc_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_type_as(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::type_as(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor,Tensor> xla__unique(const Tensor & self, bool sorted, bool return_inverse) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_unique(_r_self, sorted, return_inverse); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor,Tensor> xla__unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_unique_dim(_r_self, dim, sorted, return_inverse); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla__unsafe_view(const Tensor & self, IntList size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_unsafe_view(_r_self, size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_unsqueeze(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::unsqueeze(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_unsqueeze_(Tensor & self, int64_t dim) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::unsqueeze_(_w_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_var(const Tensor & self, bool unbiased) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::var(_r_self, unbiased); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_var_1(const Tensor & self, IntList dim, bool unbiased, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::var(_r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_var_out(Tensor & result, const Tensor & self, IntList dim, bool unbiased, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::var_out(_w_result, _r_self, dim, unbiased, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_view_as(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::view_as(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_where(const Tensor & condition, const Tensor & self, const Tensor & other) { | |
auto _r_condition = condition.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::where(_r_condition, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__s_where(const Tensor & condition, const Tensor & self, const Tensor & other) { | |
auto _r_condition = condition.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::_s_where(_r_condition, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_norm_except_dim(const Tensor & v, int64_t pow, int64_t dim) { | |
auto _r_v = v.alias().ToTensor(); | |
auto&& __result = at::norm_except_dim(_r_v, pow, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(v)); | |
} | |
static Tensor xla__weight_norm(const Tensor & v, const Tensor & g, int64_t dim) { | |
auto _r_v = v.alias().ToTensor(); | |
auto _r_g = g.alias().ToTensor(); | |
auto&& __result = at::_weight_norm(_r_v, _r_g, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(g)); | |
} | |
static std::tuple<Tensor,Tensor> xla__weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) { | |
auto _r_v = v.alias().ToTensor(); | |
auto _r_g = g.alias().ToTensor(); | |
auto&& __result = at::_weight_norm_cuda_interface(_r_v, _r_g, dim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(g)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(g))); | |
} | |
static std::tuple<Tensor,Tensor> xla__weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) { | |
auto _r_grad_w = grad_w.alias().ToTensor(); | |
auto _r_saved_v = saved_v.alias().ToTensor(); | |
auto _r_saved_g = saved_g.alias().ToTensor(); | |
auto _r_saved_norms = saved_norms.alias().ToTensor(); | |
auto&& __result = at::_weight_norm_cuda_interface_backward(_r_grad_w, _r_saved_v, _r_saved_g, _r_saved_norms, dim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(saved_norms)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(saved_norms))); | |
} | |
static std::tuple<Tensor,Tensor> xla__weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) { | |
auto _r_grad_w = grad_w.alias().ToTensor(); | |
auto _r_saved_v = saved_v.alias().ToTensor(); | |
auto _r_saved_g = saved_g.alias().ToTensor(); | |
auto _r_saved_norms = saved_norms.alias().ToTensor(); | |
auto&& __result = at::_weight_norm_differentiable_backward(_r_grad_w, _r_saved_v, _r_saved_g, _r_saved_norms, dim); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(saved_norms)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(saved_norms))); | |
} | |
static Tensor & xla_zeros_out(Tensor & result, IntList size) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto&& __result = at::zeros_out(_w_result, size); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_zeros_like(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::zeros_like(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__standard_gamma_grad(const Tensor & self, const Tensor & output) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::_standard_gamma_grad(_r_self, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__standard_gamma(const Tensor & self, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_standard_gamma(_r_self, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_poisson(const Tensor & self, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::poisson(_r_self, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_native_norm(const Tensor & self, Scalar p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::native_norm(_r_self, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__sparse_sum(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_sum(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__sparse_sum_1(const Tensor & self, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_sum(_r_self, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__sparse_sum_2(const Tensor & self, IntList dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_sum(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__sparse_sum_3(const Tensor & self, IntList dim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_sum(_r_self, dim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__sparse_sum_backward(const Tensor & grad, const Tensor & self, IntList dim) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_sparse_sum_backward(_r_grad, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_norm(const Tensor & self, c10::optional<Scalar> p, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::norm(_r_self, p, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_norm_1(const Tensor & self, Scalar p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::norm(_r_self, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_norm_2(const Tensor & self, c10::optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::norm(_r_self, p, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_norm_3(const Tensor & self, c10::optional<Scalar> p, IntList dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::norm(_r_self, p, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_norm_out(Tensor & result, const Tensor & self, c10::optional<Scalar> p, IntList dim, bool keepdim, ScalarType dtype) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::norm_out(_w_result, _r_self, p, dim, keepdim, dtype); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor & xla_norm_out_1(Tensor & result, const Tensor & self, c10::optional<Scalar> p, IntList dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::norm_out(_w_result, _r_self, p, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_frobenius_norm(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::frobenius_norm(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_frobenius_norm_1(const Tensor & self, IntList dim, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::frobenius_norm(_r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_frobenius_norm_out(Tensor & result, const Tensor & self, IntList dim, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::frobenius_norm_out(_w_result, _r_self, dim, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_nuclear_norm(const Tensor & self, bool keepdim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::nuclear_norm(_r_self, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_nuclear_norm_out(Tensor & result, const Tensor & self, bool keepdim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::nuclear_norm_out(_w_result, _r_self, keepdim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_native_clone(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::native_clone(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_clone(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::clone(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_native_resize_as_(Tensor & self, const Tensor & the_template) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_the_template = the_template.alias().ToTensor(); | |
auto&& __result = at::native_resize_as_(_w_self, _r_the_template); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_resize_as_(Tensor & self, const Tensor & the_template) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_the_template = the_template.alias().ToTensor(); | |
auto&& __result = at::resize_as_(_w_self, _r_the_template); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_native_pow_out(Tensor & result, const Tensor & self, Scalar exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::native_pow_out(_w_result, _r_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_native_pow(const Tensor & self, Scalar exponent) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::native_pow(_r_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_pow_out(Tensor & result, const Tensor & self, Scalar exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pow_out(_w_result, _r_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_pow(const Tensor & self, Scalar exponent) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pow(_r_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_native_zero_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::native_zero_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_zero_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::zero_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sub_out(Tensor & result, const Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::sub_out(_w_result, _r_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_sub(const Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::sub(_r_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sub_(Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::sub_(_w_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_sub_1(const Tensor & self, Scalar other, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sub(_r_self, other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sub__1(Tensor & self, Scalar other, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sub_(_w_self, other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_rsub(const Tensor & self, const Tensor & other, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::rsub(_r_self, _r_other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_rsub_1(const Tensor & self, Scalar other, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::rsub(_r_self, other, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_s_native_addmm_out(Tensor & result, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::s_native_addmm_out(_w_result, _r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_s_native_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::s_native_addmm(_r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_s_native_addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::s_native_addmm_(_w_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla__sparse_addmm(const Tensor & self, const Tensor & sparse, const Tensor & dense, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_sparse = sparse.alias().ToTensor(); | |
auto _r_dense = dense.alias().ToTensor(); | |
auto&& __result = at::_sparse_addmm(_r_self, _r_sparse, _r_dense, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addmm_out(Tensor & result, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::addmm_out(_w_result, _r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::addmm(_r_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::addmm_(_w_self, _r_mat1, _r_mat2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla__sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntList size, const TensorOptions & options) { | |
auto&& __result = at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, options); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(sparse_dim)); | |
} | |
static Tensor xla__sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, IntList size, const Tensor & indices, const Tensor & values, const TensorOptions & options) { | |
auto _r_indices = indices.alias().ToTensor(); | |
auto _r_values = values.alias().ToTensor(); | |
auto&& __result = at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, _r_indices, _r_values, options); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(values)); | |
} | |
static Tensor & xla_sparse_resize_(Tensor & self, IntList size, int64_t sparse_dim, int64_t dense_dim) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sparse_resize_(_w_self, size, sparse_dim, dense_dim); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sparse_resize_and_clear_(Tensor & self, IntList size, int64_t sparse_dim, int64_t dense_dim) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sparse_resize_and_clear_(_w_self, size, sparse_dim, dense_dim); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_sparse_mask(const Tensor & self, SparseTensorRef mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sparse_mask(_r_self, mask); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_to_dense(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::to_dense(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static int64_t xla_sparse_dim(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sparse_dim(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static int64_t xla__dimI(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_dimI(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static int64_t xla_dense_dim(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::dense_dim(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static int64_t xla__dimV(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_dimV(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static int64_t xla__nnz(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_nnz(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor xla_coalesce(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::coalesce(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static bool xla_is_coalesced(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::is_coalesced(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor xla__indices(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_indices(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__values(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_values(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__coalesced_(Tensor & self, bool coalesced) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::_coalesced_(_w_self, coalesced); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_indices(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::indices(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_values(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::values(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_hspmm_out(Tensor & result, const Tensor & mat1, const Tensor & mat2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::hspmm_out(_w_result, _r_mat1, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_hspmm(const Tensor & mat1, const Tensor & mat2) { | |
auto _r_mat1 = mat1.alias().ToTensor(); | |
auto _r_mat2 = mat2.alias().ToTensor(); | |
auto&& __result = at::hspmm(_r_mat1, _r_mat2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(mat2)); | |
} | |
static Tensor & xla_copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::copy_sparse_to_sparse_(_w_self, _r_src, non_blocking); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static int64_t xla_numel(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::numel(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static std::vector<Tensor> xla_unbind(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::unbind(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensors(__result); | |
} | |
static Tensor xla_to_sparse(const Tensor & self, int64_t sparse_dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::to_sparse(_r_self, sparse_dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_to_sparse_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::to_sparse(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_to(const Tensor & self, const TensorOptions & options, bool non_blocking, bool copy) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::to(_r_self, options, non_blocking, copy); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_to_1(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::to(_r_self, device, dtype, non_blocking, copy); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_to_2(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::to(_r_self, dtype, non_blocking, copy); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_to_3(const Tensor & self, const Tensor & other, bool non_blocking, bool copy) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::to(_r_self, _r_other, non_blocking, copy); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::vector<Tensor> xla_meshgrid(TensorList tensors) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::meshgrid(_l_tensors); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensors(__result); | |
} | |
static Tensor xla_cartesian_prod(TensorList tensors) { | |
auto _l_tensors = XlaCreateTensorList(tensors); | |
auto&& __result = at::cartesian_prod(_l_tensors); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(tensors)); | |
} | |
static Tensor xla_combinations(const Tensor & self, int64_t r, bool with_replacement) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::combinations(_r_self, r, with_replacement); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Scalar xla_item(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::item(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Scalar xla__local_scalar_dense(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_local_scalar_dense(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla__thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const Tensor & input_bias, const Tensor & hidden_bias) { | |
auto _r_input_gates = input_gates.alias().ToTensor(); | |
auto _r_hidden_gates = hidden_gates.alias().ToTensor(); | |
auto _r_cx = cx.alias().ToTensor(); | |
auto _r_input_bias = input_bias.alias().ToTensor(); | |
auto _r_hidden_bias = hidden_bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_fused_lstm_cell(_r_input_gates, _r_hidden_gates, _r_cx, _r_input_bias, _r_hidden_bias); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hidden_bias)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hidden_bias)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(hidden_bias))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> xla__thnn_fused_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias) { | |
auto _r_grad_hy = grad_hy.alias().ToTensor(); | |
auto _r_grad_cy = grad_cy.alias().ToTensor(); | |
auto _r_cx = cx.alias().ToTensor(); | |
auto _r_cy = cy.alias().ToTensor(); | |
auto _r_workspace = workspace.alias().ToTensor(); | |
auto&& __result = at::_thnn_fused_lstm_cell_backward(_r_grad_hy, _r_grad_cy, _r_cx, _r_cy, _r_workspace, has_bias); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<3>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<4>(), XlaTensorDevice(workspace))); | |
} | |
static std::tuple<Tensor,Tensor> xla__thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias) { | |
auto _r_input_gates = input_gates.alias().ToTensor(); | |
auto _r_hidden_gates = hidden_gates.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_input_bias = input_bias.alias().ToTensor(); | |
auto _r_hidden_bias = hidden_bias.alias().ToTensor(); | |
auto&& __result = at::_thnn_fused_gru_cell(_r_input_gates, _r_hidden_gates, _r_hx, _r_input_bias, _r_hidden_bias); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hidden_bias)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hidden_bias))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> xla__thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias) { | |
auto _r_grad_hy = grad_hy.alias().ToTensor(); | |
auto _r_workspace = workspace.alias().ToTensor(); | |
auto&& __result = at::_thnn_fused_gru_cell_backward(_r_grad_hy, _r_workspace, has_bias); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<3>(), XlaTensorDevice(workspace)), CreateXlaTensor(__result.get<4>(), XlaTensorDevice(workspace))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _l_hx = XlaCreateTensorList(hx); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::lstm(_r_input, _l_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(input))); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_lstm_1(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { | |
auto _r_data = data.alias().ToTensor(); | |
auto _r_batch_sizes = batch_sizes.alias().ToTensor(); | |
auto _l_hx = XlaCreateTensorList(hx); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::lstm(_r_data, _r_batch_sizes, _l_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(batch_sizes)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(batch_sizes)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(batch_sizes))); | |
} | |
static std::tuple<Tensor,Tensor> xla_gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::gru(_r_input, _r_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hx)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hx))); | |
} | |
static std::tuple<Tensor,Tensor> xla_gru_1(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { | |
auto _r_data = data.alias().ToTensor(); | |
auto _r_batch_sizes = batch_sizes.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::gru(_r_data, _r_batch_sizes, _r_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hx)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hx))); | |
} | |
static std::tuple<Tensor,Tensor> xla_rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::rnn_tanh(_r_input, _r_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hx)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hx))); | |
} | |
static std::tuple<Tensor,Tensor> xla_rnn_tanh_1(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { | |
auto _r_data = data.alias().ToTensor(); | |
auto _r_batch_sizes = batch_sizes.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::rnn_tanh(_r_data, _r_batch_sizes, _r_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hx)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hx))); | |
} | |
static std::tuple<Tensor,Tensor> xla_rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::rnn_relu(_r_input, _r_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hx)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hx))); | |
} | |
static std::tuple<Tensor,Tensor> xla_rnn_relu_1(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { | |
auto _r_data = data.alias().ToTensor(); | |
auto _r_batch_sizes = batch_sizes.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::rnn_relu(_r_data, _r_batch_sizes, _r_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(hx)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(hx))); | |
} | |
static std::tuple<Tensor,Tensor> xla_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _l_hx = XlaCreateTensorList(hx); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto&& __result = at::lstm_cell(_r_input, _l_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(b_hh)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(b_hh))); | |
} | |
static Tensor xla_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto&& __result = at::gru_cell(_r_input, _r_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(b_hh)); | |
} | |
static Tensor xla_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto&& __result = at::rnn_tanh_cell(_r_input, _r_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(b_hh)); | |
} | |
static Tensor xla_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto&& __result = at::rnn_relu_cell(_r_input, _r_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(b_hh)); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_quantized_lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _l_hx = XlaCreateTensorList(hx); | |
auto _l_params = XlaCreateTensorList(params); | |
auto&& __result = at::quantized_lstm(_r_input, _l_hx, _l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(input)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(input))); | |
} | |
static std::tuple<Tensor,Tensor> xla_quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _l_hx = XlaCreateTensorList(hx); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto _r_packed_ih = packed_ih.alias().ToTensor(); | |
auto _r_packed_hh = packed_hh.alias().ToTensor(); | |
auto _r_col_offsets_ih = col_offsets_ih.alias().ToTensor(); | |
auto _r_col_offsets_hh = col_offsets_hh.alias().ToTensor(); | |
auto&& __result = at::quantized_lstm_cell(_r_input, _l_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh, _r_packed_ih, _r_packed_hh, _r_col_offsets_ih, _r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(col_offsets_hh)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(col_offsets_hh))); | |
} | |
static Tensor xla_quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto _r_packed_ih = packed_ih.alias().ToTensor(); | |
auto _r_packed_hh = packed_hh.alias().ToTensor(); | |
auto _r_col_offsets_ih = col_offsets_ih.alias().ToTensor(); | |
auto _r_col_offsets_hh = col_offsets_hh.alias().ToTensor(); | |
auto&& __result = at::quantized_gru_cell(_r_input, _r_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh, _r_packed_ih, _r_packed_hh, _r_col_offsets_ih, _r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(col_offsets_hh)); | |
} | |
static Tensor xla_quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto _r_packed_ih = packed_ih.alias().ToTensor(); | |
auto _r_packed_hh = packed_hh.alias().ToTensor(); | |
auto _r_col_offsets_ih = col_offsets_ih.alias().ToTensor(); | |
auto _r_col_offsets_hh = col_offsets_hh.alias().ToTensor(); | |
auto&& __result = at::quantized_rnn_relu_cell(_r_input, _r_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh, _r_packed_ih, _r_packed_hh, _r_col_offsets_ih, _r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(col_offsets_hh)); | |
} | |
static Tensor xla_quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_hx = hx.alias().ToTensor(); | |
auto _r_w_ih = w_ih.alias().ToTensor(); | |
auto _r_w_hh = w_hh.alias().ToTensor(); | |
auto _r_b_ih = b_ih.alias().ToTensor(); | |
auto _r_b_hh = b_hh.alias().ToTensor(); | |
auto _r_packed_ih = packed_ih.alias().ToTensor(); | |
auto _r_packed_hh = packed_hh.alias().ToTensor(); | |
auto _r_col_offsets_ih = col_offsets_ih.alias().ToTensor(); | |
auto _r_col_offsets_hh = col_offsets_hh.alias().ToTensor(); | |
auto&& __result = at::quantized_rnn_tanh_cell(_r_input, _r_hx, _r_w_ih, _r_w_hh, _r_b_ih, _r_b_hh, _r_packed_ih, _r_packed_hh, _r_col_offsets_ih, _r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(col_offsets_hh)); | |
} | |
static std::tuple<Tensor,Tensor> xla__pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first) { | |
auto _r_input = input.alias().ToTensor(); | |
auto _r_lengths = lengths.alias().ToTensor(); | |
auto&& __result = at::_pack_padded_sequence(_r_input, _r_lengths, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(lengths)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(lengths))); | |
} | |
static Tensor xla__pack_padded_sequence_backward(const Tensor & grad, IntList input_size, const Tensor & batch_sizes, bool batch_first) { | |
auto _r_grad = grad.alias().ToTensor(); | |
auto _r_batch_sizes = batch_sizes.alias().ToTensor(); | |
auto&& __result = at::_pack_padded_sequence_backward(_r_grad, input_size, _r_batch_sizes, batch_first); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(batch_sizes)); | |
} | |
static std::tuple<Tensor,Tensor> xla__pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length) { | |
auto _r_data = data.alias().ToTensor(); | |
auto _r_batch_sizes = batch_sizes.alias().ToTensor(); | |
auto&& __result = at::_pad_packed_sequence(_r_data, _r_batch_sizes, batch_first, padding_value, total_length); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(batch_sizes)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(batch_sizes))); | |
} | |
static void* xla_data_ptr(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::data_ptr(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor & xla_set_(Tensor & self, Storage source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::set_(_w_self, source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_set__1(Tensor & self, Storage source, int64_t storage_offset, IntList size, IntList stride) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::set_(_w_self, source, storage_offset, size, stride); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_set__2(Tensor & self, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::set_(_w_self, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_set__3(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::set_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static bool xla_is_set_to(const Tensor & self, const Tensor & tensor) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor = tensor.alias().ToTensor(); | |
auto&& __result = at::is_set_to(_r_self, _r_tensor); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor & xla_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::masked_fill_(_w_self, _r_mask, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_masked_fill__1(Tensor & self, const Tensor & mask, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::masked_fill_(_w_self, _r_mask, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::masked_scatter_(_w_self, _r_mask, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla_view(const Tensor & self, IntList size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::view(_r_self, size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::put_(_w_self, _r_index, _r_source, accumulate); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_source = source.alias().ToTensor(); | |
auto&& __result = at::index_add_(_w_self, dim, _r_index, _r_source); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::index_fill_(_w_self, dim, _r_index, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_index_fill__1(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_value = value.alias().ToTensor(); | |
auto&& __result = at::index_fill_(_w_self, dim, _r_index, _r_value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::scatter_(_w_self, dim, _r_index, _r_src); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_scatter__1(Tensor & self, int64_t dim, const Tensor & index, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::scatter_(_w_self, dim, _r_index, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto _r_src = src.alias().ToTensor(); | |
auto&& __result = at::scatter_add_(_w_self, dim, _r_index, _r_src); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_lt_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::lt_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_lt__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::lt_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_gt_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::gt_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_gt__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::gt_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_le_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::le_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_le__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::le_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_ge_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::ge_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_ge__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::ge_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_eq_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::eq_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_eq__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::eq_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_ne_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::ne_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_ne__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::ne_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla___and__(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::__and__(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla___and___1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__and__(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla___iand__(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::__iand__(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla___iand___1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__iand__(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla___or__(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::__or__(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla___or___1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__or__(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla___ior__(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::__ior__(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla___ior___1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__ior__(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla___xor__(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::__xor__(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla___xor___1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__xor__(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla___ixor__(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::__ixor__(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla___ixor___1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__ixor__(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla___lshift__(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::__lshift__(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla___lshift___1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__lshift__(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla___ilshift__(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::__ilshift__(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla___ilshift___1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__ilshift__(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor xla___rshift__(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::__rshift__(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla___rshift___1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__rshift__(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla___irshift__(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::__irshift__(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla___irshift___1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::__irshift__(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_lgamma_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::lgamma_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_atan2_(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::atan2_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_tril_(Tensor & self, int64_t diagonal) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::tril_(_w_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_triu_(Tensor & self, int64_t diagonal) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::triu_(_w_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_digamma_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::digamma_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_polygamma_(Tensor & self, int64_t n) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::polygamma_(_w_self, n); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_erfinv_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::erfinv_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_frac_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::frac_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::renorm_(_w_self, p, dim, maxnorm); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_reciprocal_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::reciprocal_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_neg_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::neg_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_pow_(Tensor & self, Scalar exponent) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::pow_(_w_self, exponent); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_pow__1(Tensor & self, const Tensor & exponent) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::pow_(_w_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_lerp_(Tensor & self, const Tensor & end, Scalar weight) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::lerp_(_w_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_sign_(Tensor & self) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::sign_(_w_self); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_fmod_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::fmod_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_fmod__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::fmod_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_remainder_(Tensor & self, Scalar other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::remainder_(_w_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_remainder__1(Tensor & self, const Tensor & other) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::remainder_(_w_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::addbmm_(_w_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_addbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::addbmm_out(_w_result, _r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_batch1 = batch1.alias().ToTensor(); | |
auto _r_batch2 = batch2.alias().ToTensor(); | |
auto&& __result = at::addbmm(_r_self, _r_batch1, _r_batch2, beta, alpha); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::addcmul_(_w_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::addcdiv_(_w_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_random_(Tensor & self, int64_t from, int64_t to, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::random_(_w_self, from, to, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_random__1(Tensor & self, int64_t to, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::random_(_w_self, to, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_random__2(Tensor & self, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::random_(_w_self, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_uniform_(Tensor & self, double from, double to, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::uniform_(_w_self, from, to, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_normal_(Tensor & self, double mean, double std, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::normal_(_w_self, mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_cauchy_(Tensor & self, double median, double sigma, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::cauchy_(_w_self, median, sigma, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_log_normal_(Tensor & self, double mean, double std, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::log_normal_(_w_self, mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_exponential_(Tensor & self, double lambd, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::exponential_(_w_self, lambd, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_geometric_(Tensor & self, double p, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::geometric_(_w_self, p, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_diag_out(Tensor & result, const Tensor & self, int64_t diagonal) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::diag_out(_w_result, _r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_diag(const Tensor & self, int64_t diagonal) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::diag(_r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_cross_out(Tensor & result, const Tensor & self, const Tensor & other, int64_t dim) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::cross_out(_w_result, _r_self, _r_other, dim); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_cross(const Tensor & self, const Tensor & other, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::cross(_r_self, _r_other, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_triu_out(Tensor & result, const Tensor & self, int64_t diagonal) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::triu_out(_w_result, _r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_triu(const Tensor & self, int64_t diagonal) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::triu(_r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_tril_out(Tensor & result, const Tensor & self, int64_t diagonal) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::tril_out(_w_result, _r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_tril(const Tensor & self, int64_t diagonal) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::tril(_r_self, diagonal); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_tril_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options) { | |
auto&& __result = at::tril_indices(row, col, offset, options); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(row)); | |
} | |
static Tensor xla_triu_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options) { | |
auto&& __result = at::triu_indices(row, col, offset, options); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(row)); | |
} | |
static Tensor xla_trace(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::trace(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ne_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ne_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ne(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ne(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ne_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::ne_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ne_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::ne(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_eq_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::eq_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_eq(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::eq(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_eq_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::eq_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_eq_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::eq(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ge_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ge_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ge(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::ge(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ge_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::ge_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ge_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::ge(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_le_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::le_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_le(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::le(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_le_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::le_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_le_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::le(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_gt_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::gt_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_gt(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::gt(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_gt_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::gt_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_gt_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::gt(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_lt_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::lt_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_lt(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::lt(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_lt_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::lt_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_lt_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::lt(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_take_out(Tensor & result, const Tensor & self, const Tensor & index) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::take_out(_w_result, _r_self, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_take(const Tensor & self, const Tensor & index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::take(_r_self, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_index_select_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::index_select_out(_w_result, _r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_index_select(const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::index_select(_r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_masked_select_out(Tensor & result, const Tensor & self, const Tensor & mask) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::masked_select_out(_w_result, _r_self, _r_mask); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_masked_select(const Tensor & self, const Tensor & mask) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_mask = mask.alias().ToTensor(); | |
auto&& __result = at::masked_select(_r_self, _r_mask); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_nonzero_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::nonzero_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_nonzero(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::nonzero(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_gather_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::gather_out(_w_result, _r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_gather(const Tensor & self, int64_t dim, const Tensor & index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_index = index.alias().ToTensor(); | |
auto&& __result = at::gather(_r_self, dim, _r_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addcmul_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::addcmul_out(_w_result, _r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::addcmul(_r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_addcdiv_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::addcdiv_out(_w_result, _r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_tensor1 = tensor1.alias().ToTensor(); | |
auto _r_tensor2 = tensor2.alias().ToTensor(); | |
auto&& __result = at::addcdiv(_r_self, _r_tensor1, _r_tensor2, value); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_gels_out(Tensor & X, Tensor & qr, const Tensor & self, const Tensor & A) { | |
auto _w_X = X.alias().ToMutableTensor(); | |
auto _w_qr = qr.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::gels_out(_w_X, _w_qr, _r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(X, qr); | |
} | |
static std::tuple<Tensor,Tensor> xla_gels(const Tensor & self, const Tensor & A) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::gels(_r_self, _r_A); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_trtrs_out(Tensor & X, Tensor & M, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { | |
auto _w_X = X.alias().ToMutableTensor(); | |
auto _w_M = M.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::trtrs_out(_w_X, _w_M, _r_self, _r_A, upper, transpose, unitriangular); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(X, M); | |
} | |
static std::tuple<Tensor,Tensor> xla_trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::trtrs(_r_self, _r_A, upper, transpose, unitriangular); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_symeig_out(Tensor & e, Tensor & V, const Tensor & self, bool eigenvectors, bool upper) { | |
auto _w_e = e.alias().ToMutableTensor(); | |
auto _w_V = V.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::symeig_out(_w_e, _w_V, _r_self, eigenvectors, upper); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(e, V); | |
} | |
static std::tuple<Tensor,Tensor> xla_symeig(const Tensor & self, bool eigenvectors, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::symeig(_r_self, eigenvectors, upper); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_eig_out(Tensor & e, Tensor & v, const Tensor & self, bool eigenvectors) { | |
auto _w_e = e.alias().ToMutableTensor(); | |
auto _w_v = v.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::eig_out(_w_e, _w_v, _r_self, eigenvectors); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(e, v); | |
} | |
static std::tuple<Tensor,Tensor> xla_eig(const Tensor & self, bool eigenvectors) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::eig(_r_self, eigenvectors); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_svd_out(Tensor & U, Tensor & S, Tensor & V, const Tensor & self, bool some, bool compute_uv) { | |
auto _w_U = U.alias().ToMutableTensor(); | |
auto _w_S = S.alias().ToMutableTensor(); | |
auto _w_V = V.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::svd_out(_w_U, _w_S, _w_V, _r_self, some, compute_uv); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(U, S, V); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_svd(const Tensor & self, bool some, bool compute_uv) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::svd(_r_self, some, compute_uv); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_cholesky_out(Tensor & result, const Tensor & self, bool upper) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cholesky_out(_w_result, _r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_cholesky(const Tensor & self, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::cholesky(_r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cholesky_helper(const Tensor & self, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::_cholesky_helper(_r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_cholesky_solve_out(Tensor & result, const Tensor & self, const Tensor & input2, bool upper) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto&& __result = at::cholesky_solve_out(_w_result, _r_self, _r_input2, upper); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_cholesky_solve(const Tensor & self, const Tensor & input2, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto&& __result = at::cholesky_solve(_r_self, _r_input2, upper); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla__cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_A = A.alias().ToTensor(); | |
auto&& __result = at::_cholesky_solve_helper(_r_self, _r_A, upper); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_potri_out(Tensor & result, const Tensor & self, bool upper) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::potri_out(_w_result, _r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_potri(const Tensor & self, bool upper) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::potri(_r_self, upper); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_pstrf_out(Tensor & u, Tensor & piv, const Tensor & self, bool upper, Scalar tol) { | |
auto _w_u = u.alias().ToMutableTensor(); | |
auto _w_piv = piv.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pstrf_out(_w_u, _w_piv, _r_self, upper, tol); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(u, piv); | |
} | |
static std::tuple<Tensor,Tensor> xla_pstrf(const Tensor & self, bool upper, Scalar tol) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::pstrf(_r_self, upper, tol); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_qr_out(Tensor & Q, Tensor & R, const Tensor & self) { | |
auto _w_Q = Q.alias().ToMutableTensor(); | |
auto _w_R = R.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::qr_out(_w_Q, _w_R, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(Q, R); | |
} | |
static std::tuple<Tensor,Tensor> xla_qr(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::qr(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_geqrf_out(Tensor & result0, Tensor & result1, const Tensor & self) { | |
auto _w_result0 = result0.alias().ToMutableTensor(); | |
auto _w_result1 = result1.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::geqrf_out(_w_result0, _w_result1, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(result0, result1); | |
} | |
static std::tuple<Tensor,Tensor> xla_geqrf(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::geqrf(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_orgqr_out(Tensor & result, const Tensor & self, const Tensor & input2) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto&& __result = at::orgqr_out(_w_result, _r_self, _r_input2); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_orgqr(const Tensor & self, const Tensor & input2) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto&& __result = at::orgqr(_r_self, _r_input2); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_ormqr_out(Tensor & result, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_input3 = input3.alias().ToTensor(); | |
auto&& __result = at::ormqr_out(_w_result, _r_self, _r_input2, _r_input3, left, transpose); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_input2 = input2.alias().ToTensor(); | |
auto _r_input3 = input3.alias().ToTensor(); | |
auto&& __result = at::ormqr(_r_self, _r_input2, _r_input3, left, transpose); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_btrifact_out(Tensor & A_LU, Tensor & pivots, const Tensor & self, bool pivot) { | |
auto _w_A_LU = A_LU.alias().ToMutableTensor(); | |
auto _w_pivots = pivots.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::btrifact_out(_w_A_LU, _w_pivots, _r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(A_LU, pivots); | |
} | |
static std::tuple<Tensor,Tensor> xla_btrifact(const Tensor & self, bool pivot) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::btrifact(_r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_btrifact_with_info_out(Tensor & A_LU, Tensor & pivots, Tensor & info, const Tensor & self, bool pivot) { | |
auto _w_A_LU = A_LU.alias().ToMutableTensor(); | |
auto _w_pivots = pivots.alias().ToMutableTensor(); | |
auto _w_info = info.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::btrifact_with_info_out(_w_A_LU, _w_pivots, _w_info, _r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(A_LU, pivots, info); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_btrifact_with_info(const Tensor & self, bool pivot) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::btrifact_with_info(_r_self, pivot); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_btrisolve_out(Tensor & result, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_LU_data = LU_data.alias().ToTensor(); | |
auto _r_LU_pivots = LU_pivots.alias().ToTensor(); | |
auto&& __result = at::btrisolve_out(_w_result, _r_self, _r_LU_data, _r_LU_pivots); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_LU_data = LU_data.alias().ToTensor(); | |
auto _r_LU_pivots = LU_pivots.alias().ToTensor(); | |
auto&& __result = at::btrisolve(_r_self, _r_LU_data, _r_LU_pivots); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_multinomial_out(Tensor & result, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::multinomial_out(_w_result, _r_self, num_samples, replacement, generator); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::multinomial(_r_self, num_samples, replacement, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_lgamma_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::lgamma_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_lgamma(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::lgamma(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_digamma_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::digamma_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_digamma(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::digamma(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_polygamma_out(Tensor & result, int64_t n, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::polygamma_out(_w_result, n, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_polygamma(int64_t n, const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::polygamma(n, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_erfinv_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::erfinv_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_erfinv(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::erfinv(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_frac_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::frac_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_frac(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::frac(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_dist(const Tensor & self, const Tensor & other, Scalar p) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::dist(_r_self, _r_other, p); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_reciprocal_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reciprocal_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_reciprocal(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reciprocal(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_neg_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::neg_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_neg(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::neg(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_atan2_out(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::atan2_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_atan2(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::atan2(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_lerp_out(Tensor & result, const Tensor & self, const Tensor & end, Scalar weight) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::lerp_out(_w_result, _r_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_lerp(const Tensor & self, const Tensor & end, Scalar weight) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_end = end.alias().ToTensor(); | |
auto&& __result = at::lerp(_r_self, _r_end, weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_histc_out(Tensor & result, const Tensor & self, int64_t bins, Scalar min, Scalar max) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::histc_out(_w_result, _r_self, bins, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::histc(_r_self, bins, min, max); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_sign_out(Tensor & result, const Tensor & self) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sign_out(_w_result, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_sign(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sign(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_fmod_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::fmod_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_fmod(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::fmod(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_fmod_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::fmod_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_fmod_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::fmod(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_remainder_out(Tensor & result, const Tensor & self, Scalar other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::remainder_out(_w_result, _r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_remainder(const Tensor & self, Scalar other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::remainder(_r_self, other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_remainder_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::remainder_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_remainder_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::remainder(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_min_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::min_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_min_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::min(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_min_2(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::min(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_max_out_1(Tensor & result, const Tensor & self, const Tensor & other) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::max_out(_w_result, _r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_max_1(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::max(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_max_2(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_median_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::median(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sort_out(_w_values, _w_indices, _r_self, dim, descending); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_sort(const Tensor & self, int64_t dim, bool descending) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::sort(_r_self, dim, descending); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { | |
auto _w_values = values.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::topk_out(_w_values, _w_indices, _r_self, k, dim, largest, sorted); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(values, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::topk(_r_self, k, dim, largest, sorted); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_all_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::all(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_any_1(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::any(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::renorm_out(_w_result, _r_self, p, dim, maxnorm); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::renorm(_r_self, p, dim, maxnorm); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::unfold(_r_self, dimension, size, step); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static bool xla_equal(const Tensor & self, const Tensor & other) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_other = other.alias().ToTensor(); | |
auto&& __result = at::equal(_r_self, _r_other); | |
(void) __result; // Avoid warnings in case not used | |
return __result; | |
} | |
static Tensor & xla_pow_out_1(Tensor & result, const Tensor & self, const Tensor & exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::pow_out(_w_result, _r_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_pow_1(const Tensor & self, const Tensor & exponent) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::pow(_r_self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_pow_out_2(Tensor & result, Scalar self, const Tensor & exponent) { | |
auto _w_result = result.alias().ToMutableTensor(); | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::pow_out(_w_result, self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return result; | |
} | |
static Tensor xla_pow_2(Scalar self, const Tensor & exponent) { | |
auto _r_exponent = exponent.alias().ToTensor(); | |
auto&& __result = at::pow(self, _r_exponent); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(exponent)); | |
} | |
static Tensor & xla_normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_mean = mean.alias().ToTensor(); | |
auto&& __result = at::normal_out(_w_output, _r_mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_normal(const Tensor & mean, double std, Generator * generator) { | |
auto _r_mean = mean.alias().ToTensor(); | |
auto&& __result = at::normal(_r_mean, std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(mean)); | |
} | |
static Tensor & xla_normal_out_1(Tensor & output, double mean, const Tensor & std, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::normal_out(_w_output, mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_normal_1(double mean, const Tensor & std, Generator * generator) { | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::normal(mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(std)); | |
} | |
static Tensor & xla_normal_out_2(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_mean = mean.alias().ToTensor(); | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::normal_out(_w_output, _r_mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_normal_2(const Tensor & mean, const Tensor & std, Generator * generator) { | |
auto _r_mean = mean.alias().ToTensor(); | |
auto _r_std = std.alias().ToTensor(); | |
auto&& __result = at::normal(_r_mean, _r_std, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(std)); | |
} | |
static Tensor xla_alias(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::alias(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla__dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_x = x.alias().ToTensor(); | |
auto _r_alpha = alpha.alias().ToTensor(); | |
auto _r_total = total.alias().ToTensor(); | |
auto&& __result = at::_dirichlet_grad_out(_w_output, _r_x, _r_alpha, _r_total); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla__dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
auto _r_x = x.alias().ToTensor(); | |
auto _r_alpha = alpha.alias().ToTensor(); | |
auto _r_total = total.alias().ToTensor(); | |
auto&& __result = at::_dirichlet_grad(_r_x, _r_alpha, _r_total); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(total)); | |
} | |
static Tensor & xla_binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::binary_cross_entropy_out(_w_output, _r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::binary_cross_entropy(_r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::binary_cross_entropy_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::binary_cross_entropy_backward(_r_grad_output, _r_self, _r_target, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::mse_loss_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::mse_loss(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::mse_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::mse_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::l1_loss_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::l1_loss(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::l1_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::l1_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::multi_margin_loss_out(_w_output, _r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::multi_margin_loss(_r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::multi_margin_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::multi_margin_loss_backward(_r_grad_output, _r_self, _r_target, p, margin, _r_weight, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::multilabel_margin_loss_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::multilabel_margin_loss(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_is_target = is_target.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::multilabel_margin_loss_forward_out(_w_output, _w_is_target, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, is_target); | |
} | |
static std::tuple<Tensor,Tensor> xla_multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::multilabel_margin_loss_forward(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_is_target = is_target.alias().ToTensor(); | |
auto&& __result = at::multilabel_margin_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction, _r_is_target); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_is_target = is_target.alias().ToTensor(); | |
auto&& __result = at::multilabel_margin_loss_backward(_r_grad_output, _r_self, _r_target, reduction, _r_is_target); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss_out(_w_output, _r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss(_r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_total_weight = total_weight.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss_forward_out(_w_output, _w_total_weight, _r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, total_weight); | |
} | |
static std::tuple<Tensor,Tensor> xla_nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss_forward(_r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss_backward(_r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss2d_out(_w_output, _r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss2d(_r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_total_weight = total_weight.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss2d_forward_out(_w_output, _w_total_weight, _r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, total_weight); | |
} | |
static std::tuple<Tensor,Tensor> xla_nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss2d_forward(_r_self, _r_target, _r_weight, reduction, ignore_index); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss2d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_total_weight = total_weight.alias().ToTensor(); | |
auto&& __result = at::nll_loss2d_backward(_r_grad_output, _r_self, _r_target, _r_weight, reduction, ignore_index, _r_total_weight); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::smooth_l1_loss_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::smooth_l1_loss(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::smooth_l1_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::smooth_l1_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::soft_margin_loss_out(_w_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::soft_margin_loss(_r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::soft_margin_loss_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_target = target.alias().ToTensor(); | |
auto&& __result = at::soft_margin_loss_backward(_r_grad_output, _r_self, _r_target, reduction); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::elu_out(_w_output, _r_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::elu(_r_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::elu_backward_out(_w_grad_input, _r_grad_output, alpha, scale, input_scale, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::elu_backward(_r_grad_output, alpha, scale, input_scale, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(output)); | |
} | |
static Tensor & xla_elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::elu_(_w_self, alpha, scale, input_scale); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_glu_out(Tensor & output, const Tensor & self, int64_t dim) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::glu_out(_w_output, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_glu(const Tensor & self, int64_t dim) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::glu(_r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::glu_backward_out(_w_grad_input, _r_grad_output, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::glu_backward(_r_grad_output, _r_self, dim); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::hardtanh_out(_w_output, _r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::hardtanh(_r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::hardtanh_backward_out(_w_grad_input, _r_grad_output, _r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::hardtanh_backward(_r_grad_output, _r_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::hardtanh_(_w_self, min_val, max_val); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::leaky_relu_out(_w_output, _r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_leaky_relu(const Tensor & self, Scalar negative_slope) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::leaky_relu(_r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::leaky_relu_backward_out(_w_grad_input, _r_grad_output, _r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::leaky_relu_backward(_r_grad_output, _r_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_leaky_relu_(Tensor & self, Scalar negative_slope) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto&& __result = at::leaky_relu_(_w_self, negative_slope); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_log_sigmoid_out(Tensor & output, const Tensor & self) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_sigmoid_out(_w_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_log_sigmoid(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_sigmoid(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_buffer = buffer.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_sigmoid_forward_out(_w_output, _w_buffer, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, buffer); | |
} | |
static std::tuple<Tensor,Tensor> xla_log_sigmoid_forward(const Tensor & self) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::log_sigmoid_forward(_r_self); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_buffer = buffer.alias().ToTensor(); | |
auto&& __result = at::log_sigmoid_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_buffer); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_buffer = buffer.alias().ToTensor(); | |
auto&& __result = at::log_sigmoid_backward(_r_grad_output, _r_self, _r_buffer); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::rrelu_with_noise_out(_w_output, _r_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::rrelu_with_noise(_r_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::rrelu_with_noise_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_noise, lower, upper, training); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::rrelu_with_noise_backward(_r_grad_output, _r_self, _r_noise, lower, upper, training); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
auto _w_self = self.alias().ToMutableTensor(); | |
auto _r_noise = noise.alias().ToTensor(); | |
auto&& __result = at::rrelu_with_noise_(_w_self, _r_noise, lower, upper, training, generator); | |
(void) __result; // Avoid warnings in case not used | |
return self; | |
} | |
static Tensor & xla_softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softplus_out(_w_output, _r_self, beta, threshold); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_softplus(const Tensor & self, Scalar beta, Scalar threshold) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softplus(_r_self, beta, threshold); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::softplus_backward_out(_w_grad_input, _r_grad_output, _r_self, beta, threshold, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::softplus_backward(_r_grad_output, _r_self, beta, threshold, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softshrink_out(_w_output, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_softshrink(const Tensor & self, Scalar lambd) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softshrink(_r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softshrink_backward_out(_w_grad_input, _r_grad_output, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::softshrink_backward(_r_grad_output, _r_self, lambd); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool2d_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_adaptive_avg_pool2d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool2d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_adaptive_avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool2d_backward_out(_w_grad_input, _r_grad_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool2d_backward(_r_grad_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool3d_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_adaptive_avg_pool3d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool3d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_avg_pool3d_backward(_r_grad_output, _r_self); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_adaptive_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool2d_out(_w_output, _w_indices, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_adaptive_max_pool2d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool2d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool2d_backward(_r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_adaptive_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool3d_out(_w_output, _w_indices, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_adaptive_max_pool3d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool3d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::adaptive_max_pool3d_backward(_r_grad_output, _r_self, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_avg_pool2d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool2d_out(_w_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool2d(_r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool2d_backward(_r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_avg_pool3d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool3d_out(_w_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool3d(_r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::avg_pool3d_backward(_r_grad_output, _r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_random_samples = random_samples.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool2d_out(_w_output, _w_indices, _r_self, kernel_size, output_size, _r_random_samples); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_fractional_max_pool2d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_random_samples = random_samples.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool2d(_r_self, kernel_size, output_size, _r_random_samples); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, output_size, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool2d_backward(_r_grad_output, _r_self, kernel_size, output_size, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_fractional_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_random_samples = random_samples.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool3d_out(_w_output, _w_indices, _r_self, kernel_size, output_size, _r_random_samples); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_fractional_max_pool3d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_random_samples = random_samples.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool3d(_r_self, kernel_size, output_size, _r_random_samples); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, output_size, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::fractional_max_pool3d_backward(_r_grad_output, _r_self, kernel_size, output_size, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_max_pool2d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool2d_with_indices_out(_w_output, _w_indices, _r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_max_pool2d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool2d_with_indices(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_pool2d_with_indices_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_pool2d_with_indices_backward(_r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_max_pool3d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_indices = indices.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool3d_with_indices_out(_w_output, _w_indices, _r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(output, indices); | |
} | |
static std::tuple<Tensor,Tensor> xla_max_pool3d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::max_pool3d_with_indices(_r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_pool3d_with_indices_backward_out(_w_grad_input, _r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_pool3d_with_indices_backward(_r_grad_output, _r_self, kernel_size, stride, padding, dilation, ceil_mode, _r_indices); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool2d_out(_w_output, _r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool2d(_r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool2d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool2d_backward(_r_grad_output, _r_self, _r_indices, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool3d_out(_w_output, _r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool3d(_r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool3d_backward_out(_w_grad_input, _r_grad_output, _r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_indices = indices.alias().ToTensor(); | |
auto&& __result = at::max_unpool3d_backward(_r_grad_output, _r_self, _r_indices, output_size, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_reflection_pad1d_out(Tensor & output, const Tensor & self, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad1d_out(_w_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_reflection_pad1d(const Tensor & self, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad1d(_r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad1d_backward_out(_w_grad_input, _r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad1d_backward(_r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_reflection_pad2d_out(Tensor & output, const Tensor & self, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad2d_out(_w_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_reflection_pad2d(const Tensor & self, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad2d(_r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad2d_backward_out(_w_grad_input, _r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::reflection_pad2d_backward(_r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_replication_pad1d_out(Tensor & output, const Tensor & self, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad1d_out(_w_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_replication_pad1d(const Tensor & self, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad1d(_r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad1d_backward_out(_w_grad_input, _r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad1d_backward(_r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_replication_pad2d_out(Tensor & output, const Tensor & self, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad2d_out(_w_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_replication_pad2d(const Tensor & self, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad2d(_r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad2d_backward_out(_w_grad_input, _r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad2d_backward(_r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_replication_pad3d_out(Tensor & output, const Tensor & self, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad3d_out(_w_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_replication_pad3d(const Tensor & self, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad3d(_r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad3d_backward_out(_w_grad_input, _r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::replication_pad3d_backward(_r_grad_output, _r_self, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_linear1d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_linear1d_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_linear1d(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_linear1d(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_linear1d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_linear1d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_bilinear2d_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_bilinear2d(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_bilinear2d(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_bilinear2d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_bilinear2d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_bicubic2d_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_bicubic2d(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_bicubic2d(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_bicubic2d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_bicubic2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_bicubic2d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_trilinear3d_out(_w_output, _r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_trilinear3d(const Tensor & self, IntList output_size, bool align_corners) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_trilinear3d(_r_self, output_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_trilinear3d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_trilinear3d_backward(_r_grad_output, output_size, input_size, align_corners); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_upsample_nearest1d_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest1d_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_nearest1d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest1d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest1d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_nearest1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest1d_backward(_r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_upsample_nearest2d_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest2d_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_nearest2d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest2d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest2d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_nearest2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest2d_backward(_r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_upsample_nearest3d_out(Tensor & output, const Tensor & self, IntList output_size) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest3d_out(_w_output, _r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_upsample_nearest3d(const Tensor & self, IntList output_size) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest3d(_r_self, output_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest3d_backward_out(_w_grad_input, _r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_upsample_nearest3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::upsample_nearest3d_backward(_r_grad_output, output_size, input_size); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor & xla_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::sigmoid_backward_out(_w_grad_input, _r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_sigmoid_backward(const Tensor & grad_output, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::sigmoid_backward(_r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(output)); | |
} | |
static Tensor & xla_tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::tanh_backward_out(_w_grad_input, _r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return grad_input; | |
} | |
static Tensor xla_tanh_backward(const Tensor & grad_output, const Tensor & output) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_output = output.alias().ToTensor(); | |
auto&& __result = at::tanh_backward(_r_grad_output, _r_output); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(output)); | |
} | |
static Tensor & xla_thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose2d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose2d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_columns = columns.alias().ToMutableTensor(); | |
auto _w_ones = ones.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose2d_forward_out(_w_output, _w_columns, _w_ones, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, columns, ones); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose2d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_columns, _r_ones); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_columns, _r_ones, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose3d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose3d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_finput = finput.alias().ToMutableTensor(); | |
auto _w_fgrad_input = fgrad_input.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose3d_forward_out(_w_output, _w_finput, _w_fgrad_input, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, finput, fgrad_input); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose3d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, output_padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose3d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_finput, _r_fgrad_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_transpose3d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, output_padding, dilation, _r_finput, _r_fgrad_input, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv2d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv2d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_finput = finput.alias().ToMutableTensor(); | |
auto _w_fgrad_input = fgrad_input.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv2d_forward_out(_w_output, _w_finput, _w_fgrad_input, _r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, finput, fgrad_input); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::thnn_conv2d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::thnn_conv2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_depthwise2d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_depthwise2d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor & xla_thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_depthwise2d_forward_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_depthwise2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &> xla_thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_depthwise2d_backward_out(_w_grad_input, _w_grad_weight, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &>(grad_input, grad_weight); | |
} | |
static std::tuple<Tensor,Tensor> xla_thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_depthwise2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv3d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv3d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_finput = finput.alias().ToMutableTensor(); | |
auto _w_fgrad_input = fgrad_input.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv3d_forward_out(_w_output, _w_finput, _w_fgrad_input, _r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, finput, fgrad_input); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv3d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::thnn_conv3d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_finput = finput.alias().ToTensor(); | |
auto _r_fgrad_input = fgrad_input.alias().ToTensor(); | |
auto&& __result = at::thnn_conv3d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, _r_finput, _r_fgrad_input, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated2d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated2d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_columns = columns.alias().ToMutableTensor(); | |
auto _w_ones = ones.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated2d_forward_out(_w_output, _w_columns, _w_ones, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, columns, ones); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated2d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated2d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated2d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor & xla_thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated3d_out(_w_output, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return output; | |
} | |
static Tensor xla_thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated3d(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _w_output = output.alias().ToMutableTensor(); | |
auto _w_columns = columns.alias().ToMutableTensor(); | |
auto _w_ones = ones.alias().ToMutableTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated3d_forward_out(_w_output, _w_columns, _w_ones, _r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(output, columns, ones); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) { | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_bias = bias.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated3d_forward(_r_self, _r_weight, kernel_size, _r_bias, stride, padding, dilation); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static std::tuple<Tensor &,Tensor &,Tensor &> xla_thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) { | |
auto _w_grad_input = grad_input.alias().ToMutableTensor(); | |
auto _w_grad_weight = grad_weight.alias().ToMutableTensor(); | |
auto _w_grad_bias = grad_bias.alias().ToMutableTensor(); | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated3d_backward_out(_w_grad_input, _w_grad_weight, _w_grad_bias, _r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor &,Tensor &,Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
static std::tuple<Tensor,Tensor,Tensor> xla_thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto _r_self = self.alias().ToTensor(); | |
auto _r_weight = weight.alias().ToTensor(); | |
auto _r_columns = columns.alias().ToTensor(); | |
auto _r_ones = ones.alias().ToTensor(); | |
auto&& __result = at::thnn_conv_dilated3d_backward(_r_grad_output, _r_self, _r_weight, kernel_size, stride, padding, dilation, _r_columns, _r_ones, output_mask); | |
(void) __result; // Avoid warnings in case not used | |
return std::tuple<Tensor,Tensor,Tensor>(CreateXlaTensor(__result.get<0>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<1>(), XlaTensorDevice(self)), CreateXlaTensor(__result.get<2>(), XlaTensorDevice(self))); | |
} | |
static Tensor xla_thnn_col2im(const Tensor & self, IntList output_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::thnn_col2im(_r_self, output_size, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_thnn_col2im_backward(const Tensor & grad_output, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::thnn_col2im_backward(_r_grad_output, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static Tensor xla_thnn_im2col(const Tensor & self, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_self = self.alias().ToTensor(); | |
auto&& __result = at::thnn_im2col(_r_self, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(self)); | |
} | |
static Tensor xla_thnn_im2col_backward(const Tensor & grad_output, IntList input_size, IntList kernel_size, IntList dilation, IntList padding, IntList stride) { | |
auto _r_grad_output = grad_output.alias().ToTensor(); | |
auto&& __result = at::thnn_im2col_backward(_r_grad_output, input_size, kernel_size, dilation, padding, stride); | |
(void) __result; // Avoid warnings in case not used | |
return CreateXlaTensor(__result, XlaTensorDevice(grad_output)); | |
} | |
static void RegisterFunctions() { | |
register_extension_backend_op( | |
Backend::TPU, | |
"set_data(Tensor, Tensor) -> void", | |
&xla_set_data); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_set_(Tensor, Storage) -> Tensor", | |
&xla__th_set_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_set_(Tensor, Storage, int64_t, IntList, IntList) -> Tensor", | |
&xla__th_set__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_set_(Tensor, Tensor) -> Tensor", | |
&xla__th_set__2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_set_(Tensor) -> Tensor", | |
&xla__th_set__3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fill_(Tensor, Scalar) -> Tensor", | |
&xla__th_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fill_(Tensor, Tensor) -> Tensor", | |
&xla__th_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_is_set_to(Tensor, Tensor) -> bool", | |
&xla__th_is_set_to); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_masked_fill_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_masked_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_masked_fill_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_masked_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_masked_fill_(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_masked_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_masked_fill_(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_masked_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_masked_scatter_(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_masked_scatter_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_masked_scatter_(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_masked_scatter_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_masked_select_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_masked_select_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_masked_select_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_masked_select_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_masked_select(Tensor, Tensor) -> Tensor", | |
&xla__th_masked_select); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_masked_select(Tensor, Tensor) -> Tensor", | |
&xla_s__th_masked_select); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_nonzero_out(Tensor, Tensor) -> Tensor", | |
&xla__th_nonzero_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_nonzero(Tensor) -> Tensor", | |
&xla__th_nonzero); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clone(Tensor) -> Tensor", | |
&xla__th_clone); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_view(Tensor, IntList) -> Tensor", | |
&xla__th_view); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_resize_as_(Tensor, Tensor) -> Tensor", | |
&xla__th_resize_as_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_index_select_out(Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla__th_index_select_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_index_select(Tensor, int64_t, Tensor) -> Tensor", | |
&xla__th_index_select); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_index_copy_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla__th_index_copy_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_take_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_take_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_take(Tensor, Tensor) -> Tensor", | |
&xla__th_take); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_put_(Tensor, Tensor, Tensor, bool) -> Tensor", | |
&xla__th_put_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_index_add_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla__th_index_add_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_index_fill_(Tensor, int64_t, Tensor, Scalar) -> Tensor", | |
&xla__th_index_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_index_fill_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla__th_index_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_unfold_out(Tensor, Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla__th_unfold_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_unfold(Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla__th_unfold); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_scatter_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla__th_scatter_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_scatter_(Tensor, int64_t, Tensor, Scalar) -> Tensor", | |
&xla__th_scatter__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_scatter_add_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla__th_scatter_add_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gather_out(Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla__th_gather_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gather(Tensor, int64_t, Tensor) -> Tensor", | |
&xla__th_gather); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_equal(Tensor, Tensor) -> bool", | |
&xla__th_equal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_and_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_and_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_and(Tensor, Scalar) -> Tensor", | |
&xla__th_and); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_and_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_and_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_and_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_and_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_and(Tensor, Tensor) -> Tensor", | |
&xla__th_and_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_and(Tensor, Tensor) -> Tensor", | |
&xla_s__th_and); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_iand_(Tensor, Scalar) -> Tensor", | |
&xla__th_iand_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_iand_(Tensor, Tensor) -> Tensor", | |
&xla__th_iand__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_iand_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_iand_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_or_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_or_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_or(Tensor, Scalar) -> Tensor", | |
&xla__th_or); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_or_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_or_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_or_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_or_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_or(Tensor, Tensor) -> Tensor", | |
&xla__th_or_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_or(Tensor, Tensor) -> Tensor", | |
&xla_s__th_or); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ior_(Tensor, Scalar) -> Tensor", | |
&xla__th_ior_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ior_(Tensor, Tensor) -> Tensor", | |
&xla__th_ior__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ior_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ior_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_xor_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_xor_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_xor(Tensor, Scalar) -> Tensor", | |
&xla__th_xor); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_xor_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_xor_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_xor_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_xor_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_xor(Tensor, Tensor) -> Tensor", | |
&xla__th_xor_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_xor(Tensor, Tensor) -> Tensor", | |
&xla_s__th_xor); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ixor_(Tensor, Scalar) -> Tensor", | |
&xla__th_ixor_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ixor_(Tensor, Tensor) -> Tensor", | |
&xla__th_ixor__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ixor_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ixor_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lshift_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_lshift_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lshift(Tensor, Scalar) -> Tensor", | |
&xla__th_lshift); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lshift_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_lshift_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lshift_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_lshift_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lshift(Tensor, Tensor) -> Tensor", | |
&xla__th_lshift_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lshift(Tensor, Tensor) -> Tensor", | |
&xla_s__th_lshift); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ilshift_(Tensor, Scalar) -> Tensor", | |
&xla__th_ilshift_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ilshift_(Tensor, Tensor) -> Tensor", | |
&xla__th_ilshift__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ilshift_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ilshift_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_rshift_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_rshift_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_rshift(Tensor, Scalar) -> Tensor", | |
&xla__th_rshift); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_rshift_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_rshift_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_rshift_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_rshift_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_rshift(Tensor, Tensor) -> Tensor", | |
&xla__th_rshift_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_rshift(Tensor, Tensor) -> Tensor", | |
&xla_s__th_rshift); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_irshift_(Tensor, Scalar) -> Tensor", | |
&xla__th_irshift_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_irshift_(Tensor, Tensor) -> Tensor", | |
&xla__th_irshift__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_irshift_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_irshift_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lt_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_lt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lt(Tensor, Scalar) -> Tensor", | |
&xla__th_lt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lt_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_lt_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lt_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_lt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lt(Tensor, Tensor) -> Tensor", | |
&xla__th_lt_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lt(Tensor, Tensor) -> Tensor", | |
&xla_s__th_lt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lt_(Tensor, Scalar) -> Tensor", | |
&xla__th_lt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lt_(Tensor, Tensor) -> Tensor", | |
&xla__th_lt__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lt_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_lt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gt_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_gt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gt(Tensor, Scalar) -> Tensor", | |
&xla__th_gt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gt_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_gt_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_gt_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_gt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gt(Tensor, Tensor) -> Tensor", | |
&xla__th_gt_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_gt(Tensor, Tensor) -> Tensor", | |
&xla_s__th_gt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gt_(Tensor, Scalar) -> Tensor", | |
&xla__th_gt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gt_(Tensor, Tensor) -> Tensor", | |
&xla__th_gt__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_gt_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_gt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_le_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_le_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_le(Tensor, Scalar) -> Tensor", | |
&xla__th_le); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_le_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_le_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_le_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_le_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_le(Tensor, Tensor) -> Tensor", | |
&xla__th_le_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_le(Tensor, Tensor) -> Tensor", | |
&xla_s__th_le); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_le_(Tensor, Scalar) -> Tensor", | |
&xla__th_le_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_le_(Tensor, Tensor) -> Tensor", | |
&xla__th_le__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_le_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_le_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ge_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_ge_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ge(Tensor, Scalar) -> Tensor", | |
&xla__th_ge); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ge_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_ge_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ge_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_ge_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ge(Tensor, Tensor) -> Tensor", | |
&xla__th_ge_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ge(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ge); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ge_(Tensor, Scalar) -> Tensor", | |
&xla__th_ge_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ge_(Tensor, Tensor) -> Tensor", | |
&xla__th_ge__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ge_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ge_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eq_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_eq_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eq(Tensor, Scalar) -> Tensor", | |
&xla__th_eq); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eq_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_eq_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_eq_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_eq_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eq(Tensor, Tensor) -> Tensor", | |
&xla__th_eq_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_eq(Tensor, Tensor) -> Tensor", | |
&xla_s__th_eq); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eq_(Tensor, Scalar) -> Tensor", | |
&xla__th_eq_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eq_(Tensor, Tensor) -> Tensor", | |
&xla__th_eq__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_eq_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_eq_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ne_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_ne_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ne(Tensor, Scalar) -> Tensor", | |
&xla__th_ne); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ne_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_ne_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ne_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_ne_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ne(Tensor, Tensor) -> Tensor", | |
&xla__th_ne_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ne(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ne); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ne_(Tensor, Scalar) -> Tensor", | |
&xla__th_ne_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ne_(Tensor, Tensor) -> Tensor", | |
&xla__th_ne__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_ne_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_ne_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_min_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_min_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_min_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_min_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_min(Tensor, Tensor) -> Tensor", | |
&xla__th_min); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_min(Tensor, Tensor) -> Tensor", | |
&xla_s__th_min); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_min(Tensor) -> Tensor", | |
&xla__th_min_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_min_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_min_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_min(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_min_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_max_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_max_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_max_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_max_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_max(Tensor, Tensor) -> Tensor", | |
&xla__th_max); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_max(Tensor, Tensor) -> Tensor", | |
&xla_s__th_max); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_max(Tensor) -> Tensor", | |
&xla__th_max_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_max_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_max_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_max(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_max_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_kthvalue_out(Tensor, Tensor, Tensor, int64_t, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_kthvalue_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_kthvalue(Tensor, int64_t, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_kthvalue); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_mode_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_mode_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_mode(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_mode); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_median(Tensor) -> Tensor", | |
&xla__th_median); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_median_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_median_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_median(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_median_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sort_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_sort_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sort(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_sort); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_topk_out(Tensor, Tensor, Tensor, int64_t, int64_t, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_topk_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_topk(Tensor, int64_t, int64_t, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_topk); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_any(Tensor) -> Tensor", | |
&xla__th_any); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_any_out(Tensor, Tensor, int64_t, bool) -> Tensor", | |
&xla__th_any_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_any(Tensor, int64_t, bool) -> Tensor", | |
&xla__th_any_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_abs_out(Tensor, Tensor) -> Tensor", | |
&xla__th_abs_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_abs(Tensor) -> Tensor", | |
&xla__th_abs); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sigmoid_out(Tensor, Tensor) -> Tensor", | |
&xla__th_sigmoid_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sigmoid(Tensor) -> Tensor", | |
&xla__th_sigmoid); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log_out(Tensor, Tensor) -> Tensor", | |
&xla__th_log_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log(Tensor) -> Tensor", | |
&xla__th_log); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log10_out(Tensor, Tensor) -> Tensor", | |
&xla__th_log10_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log10(Tensor) -> Tensor", | |
&xla__th_log10); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log1p_out(Tensor, Tensor) -> Tensor", | |
&xla__th_log1p_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log1p(Tensor) -> Tensor", | |
&xla__th_log1p); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log2_out(Tensor, Tensor) -> Tensor", | |
&xla__th_log2_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log2(Tensor) -> Tensor", | |
&xla__th_log2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lgamma_out(Tensor, Tensor) -> Tensor", | |
&xla__th_lgamma_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lgamma(Tensor) -> Tensor", | |
&xla__th_lgamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lgamma_(Tensor) -> Tensor", | |
&xla__th_lgamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_digamma_out(Tensor, Tensor) -> Tensor", | |
&xla__th_digamma_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_digamma(Tensor) -> Tensor", | |
&xla__th_digamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_digamma_(Tensor) -> Tensor", | |
&xla__th_digamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_polygamma_out(Tensor, int64_t, Tensor) -> Tensor", | |
&xla__th_polygamma_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_polygamma(int64_t, Tensor) -> Tensor", | |
&xla__th_polygamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_polygamma_(Tensor, int64_t) -> Tensor", | |
&xla__th_polygamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_exp_out(Tensor, Tensor) -> Tensor", | |
&xla__th_exp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_exp(Tensor) -> Tensor", | |
&xla__th_exp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_expm1_out(Tensor, Tensor) -> Tensor", | |
&xla__th_expm1_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_expm1(Tensor) -> Tensor", | |
&xla__th_expm1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cos_out(Tensor, Tensor) -> Tensor", | |
&xla__th_cos_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cos(Tensor) -> Tensor", | |
&xla__th_cos); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_acos_out(Tensor, Tensor) -> Tensor", | |
&xla__th_acos_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_acos(Tensor) -> Tensor", | |
&xla__th_acos); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cosh_out(Tensor, Tensor) -> Tensor", | |
&xla__th_cosh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cosh(Tensor) -> Tensor", | |
&xla__th_cosh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sin_out(Tensor, Tensor) -> Tensor", | |
&xla__th_sin_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sin(Tensor) -> Tensor", | |
&xla__th_sin); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_asin_out(Tensor, Tensor) -> Tensor", | |
&xla__th_asin_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_asin(Tensor) -> Tensor", | |
&xla__th_asin); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sinh_out(Tensor, Tensor) -> Tensor", | |
&xla__th_sinh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sinh(Tensor) -> Tensor", | |
&xla__th_sinh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_tan_out(Tensor, Tensor) -> Tensor", | |
&xla__th_tan_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_tan(Tensor) -> Tensor", | |
&xla__th_tan); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_atan_out(Tensor, Tensor) -> Tensor", | |
&xla__th_atan_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_atan(Tensor) -> Tensor", | |
&xla__th_atan); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_tanh_out(Tensor, Tensor) -> Tensor", | |
&xla__th_tanh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_tanh(Tensor) -> Tensor", | |
&xla__th_tanh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erf_out(Tensor, Tensor) -> Tensor", | |
&xla__th_erf_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erf(Tensor) -> Tensor", | |
&xla__th_erf); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erfc_out(Tensor, Tensor) -> Tensor", | |
&xla__th_erfc_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erfc(Tensor) -> Tensor", | |
&xla__th_erfc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erfinv_(Tensor) -> Tensor", | |
&xla__th_erfinv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erfinv_out(Tensor, Tensor) -> Tensor", | |
&xla__th_erfinv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_erfinv(Tensor) -> Tensor", | |
&xla__th_erfinv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sqrt_out(Tensor, Tensor) -> Tensor", | |
&xla__th_sqrt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sqrt(Tensor) -> Tensor", | |
&xla__th_sqrt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_rsqrt_out(Tensor, Tensor) -> Tensor", | |
&xla__th_rsqrt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_rsqrt(Tensor) -> Tensor", | |
&xla__th_rsqrt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ceil_out(Tensor, Tensor) -> Tensor", | |
&xla__th_ceil_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ceil(Tensor) -> Tensor", | |
&xla__th_ceil); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_floor_out(Tensor, Tensor) -> Tensor", | |
&xla__th_floor_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_floor(Tensor) -> Tensor", | |
&xla__th_floor); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_round_out(Tensor, Tensor) -> Tensor", | |
&xla__th_round_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_round(Tensor) -> Tensor", | |
&xla__th_round); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_trunc_out(Tensor, Tensor) -> Tensor", | |
&xla__th_trunc_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_trunc(Tensor) -> Tensor", | |
&xla__th_trunc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_frac_(Tensor) -> Tensor", | |
&xla__th_frac_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_frac_out(Tensor, Tensor) -> Tensor", | |
&xla__th_frac_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_frac(Tensor) -> Tensor", | |
&xla__th_frac); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_var_out(Tensor, Tensor, int64_t, bool, bool) -> Tensor", | |
&xla__th_var_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_var(Tensor, int64_t, bool, bool) -> Tensor", | |
&xla__th_var); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_var(Tensor, bool) -> Tensor", | |
&xla__th_var_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_std_out(Tensor, Tensor, int64_t, bool, bool) -> Tensor", | |
&xla__th_std_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_std(Tensor, int64_t, bool, bool) -> Tensor", | |
&xla__th_std); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_std(Tensor, bool) -> Tensor", | |
&xla__th_std_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_renorm_out(Tensor, Tensor, Scalar, int64_t, Scalar) -> Tensor", | |
&xla__th_renorm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_renorm(Tensor, Scalar, int64_t, Scalar) -> Tensor", | |
&xla__th_renorm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_renorm_(Tensor, Scalar, int64_t, Scalar) -> Tensor", | |
&xla__th_renorm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_dist(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_dist); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_dist(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_dist); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_reciprocal_out(Tensor, Tensor) -> Tensor", | |
&xla__th_reciprocal_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_reciprocal(Tensor) -> Tensor", | |
&xla__th_reciprocal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_reciprocal_(Tensor) -> Tensor", | |
&xla__th_reciprocal_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_neg_out(Tensor, Tensor) -> Tensor", | |
&xla__th_neg_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_neg(Tensor) -> Tensor", | |
&xla__th_neg); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_neg_(Tensor) -> Tensor", | |
&xla__th_neg_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_atan2_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_atan2_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_atan2_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_atan2_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_atan2(Tensor, Tensor) -> Tensor", | |
&xla__th_atan2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_atan2(Tensor, Tensor) -> Tensor", | |
&xla_s__th_atan2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_atan2_(Tensor, Tensor) -> Tensor", | |
&xla__th_atan2_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_atan2_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_atan2_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_pow_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow(Tensor, Scalar) -> Tensor", | |
&xla__th_pow); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_pow_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_pow_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_pow_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow(Tensor, Tensor) -> Tensor", | |
&xla__th_pow_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_pow(Tensor, Tensor) -> Tensor", | |
&xla_s__th_pow); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow_out(Tensor, Scalar, Tensor) -> Tensor", | |
&xla__th_pow_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow(Scalar, Tensor) -> Tensor", | |
&xla__th_pow_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow_(Tensor, Scalar) -> Tensor", | |
&xla__th_pow_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pow_(Tensor, Tensor) -> Tensor", | |
&xla__th_pow__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_pow_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_pow_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lerp_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_lerp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lerp_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_lerp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lerp(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_lerp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lerp(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_lerp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_lerp_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_lerp_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_lerp_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_lerp_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_histc_out(Tensor, Tensor, int64_t, Scalar, Scalar) -> Tensor", | |
&xla__th_histc_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_histc(Tensor, int64_t, Scalar, Scalar) -> Tensor", | |
&xla__th_histc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_zero_(Tensor) -> Tensor", | |
&xla__th_zero_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cumsum_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__th_cumsum_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cumsum(Tensor, int64_t) -> Tensor", | |
&xla__th_cumsum); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cumprod_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__th_cumprod_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cumprod(Tensor, int64_t) -> Tensor", | |
&xla__th_cumprod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sign_out(Tensor, Tensor) -> Tensor", | |
&xla__th_sign_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sign(Tensor) -> Tensor", | |
&xla__th_sign); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_sign_(Tensor) -> Tensor", | |
&xla__th_sign_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_trace(Tensor) -> Tensor", | |
&xla__th_trace); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fmod_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_fmod_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fmod(Tensor, Scalar) -> Tensor", | |
&xla__th_fmod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fmod_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_fmod_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_fmod_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_fmod_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fmod(Tensor, Tensor) -> Tensor", | |
&xla__th_fmod_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_fmod(Tensor, Tensor) -> Tensor", | |
&xla_s__th_fmod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fmod_(Tensor, Scalar) -> Tensor", | |
&xla__th_fmod_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_fmod_(Tensor, Tensor) -> Tensor", | |
&xla__th_fmod__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_fmod_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_fmod_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_remainder_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_remainder_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_remainder(Tensor, Scalar) -> Tensor", | |
&xla__th_remainder); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_remainder_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_remainder_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_remainder_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_s__th_remainder_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_remainder(Tensor, Tensor) -> Tensor", | |
&xla__th_remainder_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_remainder(Tensor, Tensor) -> Tensor", | |
&xla_s__th_remainder); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_remainder_(Tensor, Scalar) -> Tensor", | |
&xla__th_remainder_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_remainder_(Tensor, Tensor) -> Tensor", | |
&xla__th_remainder__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_remainder_(Tensor, Tensor) -> Tensor", | |
&xla_s__th_remainder_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clamp_out(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_clamp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clamp(Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_clamp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clamp_min_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_clamp_min_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clamp_min(Tensor, Scalar) -> Tensor", | |
&xla__th_clamp_min); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clamp_max_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_clamp_max_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_clamp_max(Tensor, Scalar) -> Tensor", | |
&xla__th_clamp_max); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_dot(Tensor, Tensor) -> Tensor", | |
&xla__th_dot); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cross_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__th_cross_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cross(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__th_cross); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_diag_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__th_diag_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_diag(Tensor, int64_t) -> Tensor", | |
&xla__th_diag); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addmm_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addmm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addmv_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addmv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addmv_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addmv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addmv(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addmv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addmv(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addmv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addmv_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addmv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addr_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addr_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addr(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addr(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addr_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addr_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ger_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_ger_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ger(Tensor, Tensor) -> Tensor", | |
&xla__th_ger); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_mv_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_mv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_mv(Tensor, Tensor) -> Tensor", | |
&xla__th_mv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_mm_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_mm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_mm(Tensor, Tensor) -> Tensor", | |
&xla__th_mm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_bmm_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_bmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_bmm(Tensor, Tensor) -> Tensor", | |
&xla__th_bmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addbmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addbmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addbmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addbmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addbmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addbmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addbmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_addbmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addbmm_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_addbmm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_baddbmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_baddbmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_baddbmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_baddbmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_baddbmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__th_baddbmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_baddbmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s__th_baddbmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addcmul_out(Tensor, Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_addcmul_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addcmul_out(Tensor, Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_addcmul_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addcmul(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_addcmul); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addcmul(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_addcmul); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addcmul_(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_addcmul_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addcmul_(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_addcmul_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addcdiv_out(Tensor, Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_addcdiv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addcdiv_out(Tensor, Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_addcdiv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addcdiv(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_addcdiv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addcdiv(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_addcdiv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_addcdiv_(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__th_addcdiv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s__th_addcdiv_(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_s__th_addcdiv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gels_out(Tensor, Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__th_gels_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_gels(Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__th_gels); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_trtrs_out(Tensor, Tensor, Tensor, Tensor, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_trtrs_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_trtrs(Tensor, Tensor, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_trtrs); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_symeig_out(Tensor, Tensor, Tensor, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_symeig_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_symeig(Tensor, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_symeig); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eig_out(Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_eig_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_eig(Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_eig); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_svd_out(Tensor, Tensor, Tensor, Tensor, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__th_svd_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_svd(Tensor, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__th_svd); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_getri_single_out(Tensor, Tensor) -> Tensor", | |
&xla__th_getri_single_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_getri_single(Tensor) -> Tensor", | |
&xla__th_getri_single); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_potri_out(Tensor, Tensor, bool) -> Tensor", | |
&xla__th_potri_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_potri(Tensor, bool) -> Tensor", | |
&xla__th_potri); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pstrf_out(Tensor, Tensor, Tensor, bool, Scalar) -> std::tuple<Tensor,Tensor>", | |
&xla__th_pstrf_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_pstrf(Tensor, bool, Scalar) -> std::tuple<Tensor,Tensor>", | |
&xla__th_pstrf); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_qr_out(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__th_qr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_qr(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__th_qr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_geqrf_out(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__th_geqrf_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_geqrf(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__th_geqrf); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_orgqr_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_orgqr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_orgqr(Tensor, Tensor) -> Tensor", | |
&xla__th_orgqr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ormqr_out(Tensor, Tensor, Tensor, Tensor, bool, bool) -> Tensor", | |
&xla__th_ormqr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_ormqr(Tensor, Tensor, Tensor, bool, bool) -> Tensor", | |
&xla__th_ormqr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_btrifact_out(Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_btrifact_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_btrifact(Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__th_btrifact); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_btrifact_with_info_out(Tensor, Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__th_btrifact_with_info_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_btrifact_with_info(Tensor, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__th_btrifact_with_info); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_btrisolve_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_btrisolve_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_btrisolve(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_btrisolve); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_random_(Tensor, int64_t, int64_t, Generator) -> Tensor", | |
&xla__th_random_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_random_(Tensor, int64_t, Generator) -> Tensor", | |
&xla__th_random__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_random_(Tensor, Generator) -> Tensor", | |
&xla__th_random__2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_multinomial_out(Tensor, Tensor, int64_t, bool, Generator) -> Tensor", | |
&xla__th_multinomial_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_multinomial(Tensor, int64_t, bool, Generator) -> Tensor", | |
&xla__th_multinomial); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_uniform_(Tensor, double, double, Generator) -> Tensor", | |
&xla__th_uniform_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal_out(Tensor, Tensor, double, Generator) -> Tensor", | |
&xla__th_normal_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal(Tensor, double, Generator) -> Tensor", | |
&xla__th_normal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal_out(Tensor, double, Tensor, Generator) -> Tensor", | |
&xla__th_normal_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal(double, Tensor, Generator) -> Tensor", | |
&xla__th_normal_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal_out(Tensor, Tensor, Tensor, Generator) -> Tensor", | |
&xla__th_normal_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal(Tensor, Tensor, Generator) -> Tensor", | |
&xla__th_normal_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_normal_(Tensor, double, double, Generator) -> Tensor", | |
&xla__th_normal_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cauchy_(Tensor, double, double, Generator) -> Tensor", | |
&xla__th_cauchy_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_log_normal_(Tensor, double, double, Generator) -> Tensor", | |
&xla__th_log_normal_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_exponential_(Tensor, double, Generator) -> Tensor", | |
&xla__th_exponential_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_geometric_(Tensor, double, Generator) -> Tensor", | |
&xla__th_geometric_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_dirichlet_grad_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_dirichlet_grad_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_dirichlet_grad(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__th_dirichlet_grad); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_alias(Tensor) -> Tensor", | |
&xla__th_alias); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_copy_ignoring_overlaps_(Tensor, Tensor) -> Tensor", | |
&xla__th_copy_ignoring_overlaps_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cat_out(Tensor, TensorList, int64_t) -> Tensor", | |
&xla__th_cat_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_th_cat(TensorList, int64_t) -> Tensor", | |
&xla__th_cat); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_binary_cross_entropy_forward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_binary_cross_entropy_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_binary_cross_entropy_forward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_binary_cross_entropy_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_binary_cross_entropy_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_binary_cross_entropy_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_binary_cross_entropy_backward(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_binary_cross_entropy_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_l1_loss_forward_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_l1_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_l1_loss_forward(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_l1_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_l1_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_l1_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_l1_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_l1_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_mse_loss_forward_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_mse_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_mse_loss_forward(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_mse_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_mse_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_mse_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_mse_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_mse_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multi_margin_loss_forward_out(Tensor, Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla__thnn_multi_margin_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multi_margin_loss_forward(Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla__thnn_multi_margin_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multi_margin_loss_backward_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla__thnn_multi_margin_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multi_margin_loss_backward(Tensor, Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla__thnn_multi_margin_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multilabel_margin_loss_forward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_multilabel_margin_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multilabel_margin_loss_forward(Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_multilabel_margin_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multilabel_margin_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla__thnn_multilabel_margin_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_multilabel_margin_loss_backward(Tensor, Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla__thnn_multilabel_margin_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_nll_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss_forward(Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_nll_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla__thnn_nll_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss_backward(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla__thnn_nll_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_nll_loss2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss2d_forward(Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_nll_loss2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla__thnn_nll_loss2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_nll_loss2d_backward(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla__thnn_nll_loss2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_smooth_l1_loss_forward_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_smooth_l1_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_smooth_l1_loss_forward(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_smooth_l1_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_smooth_l1_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_smooth_l1_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_smooth_l1_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_smooth_l1_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_soft_margin_loss_forward_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_soft_margin_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_soft_margin_loss_forward(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_soft_margin_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_soft_margin_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_soft_margin_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_soft_margin_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_soft_margin_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_elu_forward_out(Tensor, Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla__thnn_elu_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_elu_forward(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla__thnn_elu_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_elu_backward_out(Tensor, Tensor, Scalar, Scalar, Scalar, Tensor) -> Tensor", | |
&xla__thnn_elu_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_elu_backward(Tensor, Scalar, Scalar, Scalar, Tensor) -> Tensor", | |
&xla__thnn_elu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_elu_(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla__thnn_elu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_elu_forward_(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla__thnn_elu_forward_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_glu_forward_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_glu_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_glu_forward(Tensor, int64_t) -> Tensor", | |
&xla__thnn_glu_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_glu_backward_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_glu_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_glu_backward(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__thnn_glu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_hardtanh_forward_out(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_hardtanh_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_hardtanh_forward(Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_hardtanh_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_hardtanh_backward_out(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_hardtanh_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_hardtanh_backward(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_hardtanh_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_hardtanh_(Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_hardtanh_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_hardtanh_forward_(Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_hardtanh_forward_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_leaky_relu_forward_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__thnn_leaky_relu_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_leaky_relu_forward(Tensor, Scalar) -> Tensor", | |
&xla__thnn_leaky_relu_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_leaky_relu_backward_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__thnn_leaky_relu_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_leaky_relu_backward(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__thnn_leaky_relu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_leaky_relu_(Tensor, Scalar) -> Tensor", | |
&xla__thnn_leaky_relu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_leaky_relu_forward_(Tensor, Scalar) -> Tensor", | |
&xla__thnn_leaky_relu_forward_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_log_sigmoid_forward_out(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_log_sigmoid_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_log_sigmoid_forward(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_log_sigmoid_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_log_sigmoid_backward_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_log_sigmoid_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_log_sigmoid_backward(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_log_sigmoid_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_rrelu_with_noise_forward_out(Tensor, Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla__thnn_rrelu_with_noise_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_rrelu_with_noise_forward(Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla__thnn_rrelu_with_noise_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_rrelu_with_noise_backward_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, bool) -> Tensor", | |
&xla__thnn_rrelu_with_noise_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_rrelu_with_noise_backward(Tensor, Tensor, Tensor, Scalar, Scalar, bool) -> Tensor", | |
&xla__thnn_rrelu_with_noise_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_rrelu_with_noise_(Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla__thnn_rrelu_with_noise_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_rrelu_with_noise_forward_(Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla__thnn_rrelu_with_noise_forward_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softplus_forward_out(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_softplus_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softplus_forward(Tensor, Scalar, Scalar) -> Tensor", | |
&xla__thnn_softplus_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softplus_backward_out(Tensor, Tensor, Tensor, Scalar, Scalar, Tensor) -> Tensor", | |
&xla__thnn_softplus_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softplus_backward(Tensor, Tensor, Scalar, Scalar, Tensor) -> Tensor", | |
&xla__thnn_softplus_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softshrink_forward_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__thnn_softshrink_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softshrink_forward(Tensor, Scalar) -> Tensor", | |
&xla__thnn_softshrink_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softshrink_backward_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__thnn_softshrink_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_softshrink_backward(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__thnn_softshrink_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_avg_pool3d_forward_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_adaptive_avg_pool3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_avg_pool3d_forward(Tensor, IntList) -> Tensor", | |
&xla__thnn_adaptive_avg_pool3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_avg_pool3d_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_adaptive_avg_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_avg_pool3d_backward(Tensor, Tensor) -> Tensor", | |
&xla__thnn_adaptive_avg_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool2d_forward_out(Tensor, Tensor, Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_adaptive_max_pool2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool2d_forward(Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_adaptive_max_pool2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool2d_backward_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_adaptive_max_pool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool2d_backward(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_adaptive_max_pool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool3d_forward_out(Tensor, Tensor, Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_adaptive_max_pool3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool3d_forward(Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_adaptive_max_pool3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool3d_backward_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_adaptive_max_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_adaptive_max_pool3d_backward(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_adaptive_max_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool2d_forward_out(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool2d_forward(Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool2d_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool2d_backward(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool3d_forward_out(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool3d_forward(Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool3d_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_avg_pool3d_backward(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla__thnn_avg_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool2d_with_indices_forward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_max_pool2d_with_indices_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool2d_with_indices_forward(Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_max_pool2d_with_indices_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool2d_with_indices_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla__thnn_max_pool2d_with_indices_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool2d_with_indices_backward(Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla__thnn_max_pool2d_with_indices_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool3d_with_indices_forward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_max_pool3d_with_indices_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool3d_with_indices_forward(Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_max_pool3d_with_indices_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool3d_with_indices_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla__thnn_max_pool3d_with_indices_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_pool3d_with_indices_backward(Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla__thnn_max_pool3d_with_indices_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool2d_forward_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_max_unpool2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool2d_forward(Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_max_unpool2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool2d_backward_out(Tensor, Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_max_unpool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool2d_backward(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_max_unpool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool3d_forward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_max_unpool3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool3d_forward(Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_max_unpool3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool3d_backward_out(Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_max_unpool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_max_unpool3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_max_unpool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_linear1d_forward_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_linear1d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_linear1d_forward(Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_linear1d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_linear1d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_linear1d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_linear1d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_linear1d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bilinear2d_forward_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bilinear2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bilinear2d_forward(Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bilinear2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bilinear2d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bilinear2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bilinear2d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bilinear2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bicubic2d_forward_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bicubic2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bicubic2d_forward(Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bicubic2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bicubic2d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bicubic2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_bicubic2d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_bicubic2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_trilinear3d_forward_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_trilinear3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_trilinear3d_forward(Tensor, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_trilinear3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_trilinear3d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_trilinear3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_trilinear3d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla__thnn_upsample_trilinear3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest1d_forward_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest1d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest1d_forward(Tensor, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest1d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest1d_backward_out(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest1d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest1d_backward(Tensor, IntList, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest1d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest2d_forward_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest2d_forward(Tensor, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest2d_backward_out(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest2d_backward(Tensor, IntList, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest3d_forward_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest3d_forward(Tensor, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest3d_backward_out(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_upsample_nearest3d_backward(Tensor, IntList, IntList) -> Tensor", | |
&xla__thnn_upsample_nearest3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_sigmoid_forward_out(Tensor, Tensor) -> Tensor", | |
&xla__thnn_sigmoid_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_sigmoid_forward(Tensor) -> Tensor", | |
&xla__thnn_sigmoid_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_sigmoid_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_sigmoid_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_sigmoid_backward(Tensor, Tensor) -> Tensor", | |
&xla__thnn_sigmoid_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_tanh_forward_out(Tensor, Tensor) -> Tensor", | |
&xla__thnn_tanh_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_tanh_forward(Tensor) -> Tensor", | |
&xla__thnn_tanh_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_tanh_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__thnn_tanh_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_tanh_backward(Tensor, Tensor) -> Tensor", | |
&xla__thnn_tanh_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose3d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose3d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose3d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_transpose3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_transpose3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_depthwise2d_forward_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_conv_depthwise2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_depthwise2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_conv_depthwise2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_depthwise2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_conv_depthwise2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_depthwise2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, std::array<bool,2>) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_conv_depthwise2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv3d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv3d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv3d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated3d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated3d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated3d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_conv_dilated3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_conv_dilated3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_col2im_forward_out(Tensor, Tensor, IntList, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_col2im_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_col2im_forward(Tensor, IntList, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_col2im_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_col2im_backward_out(Tensor, Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_col2im_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_col2im_backward(Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_col2im_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_im2col_forward_out(Tensor, Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_im2col_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_im2col_forward(Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_im2col_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_im2col_backward_out(Tensor, Tensor, IntList, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_im2col_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_im2col_backward(Tensor, IntList, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla__thnn_im2col_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Byte(Tensor, bool) -> Tensor", | |
&xla__cast_Byte); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Char(Tensor, bool) -> Tensor", | |
&xla__cast_Char); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Double(Tensor, bool) -> Tensor", | |
&xla__cast_Double); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Float(Tensor, bool) -> Tensor", | |
&xla__cast_Float); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Int(Tensor, bool) -> Tensor", | |
&xla__cast_Int); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Long(Tensor, bool) -> Tensor", | |
&xla__cast_Long); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Short(Tensor, bool) -> Tensor", | |
&xla__cast_Short); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cast_Half(Tensor, bool) -> Tensor", | |
&xla__cast_Half); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_fused_dropout(Tensor, double, Generator) -> std::tuple<Tensor,Tensor>", | |
&xla__fused_dropout); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_masked_scale(Tensor, Tensor, double) -> Tensor", | |
&xla__masked_scale); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_reshape_from_tensor(Tensor, Tensor) -> Tensor", | |
&xla__reshape_from_tensor); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_shape_as_tensor(Tensor) -> Tensor", | |
&xla__shape_as_tensor); | |
register_extension_backend_op( | |
Backend::TPU, | |
"dropout(Tensor, double, bool) -> Tensor", | |
&xla_dropout); | |
register_extension_backend_op( | |
Backend::TPU, | |
"dropout_(Tensor, double, bool) -> Tensor", | |
&xla_dropout_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"feature_dropout(Tensor, double, bool) -> Tensor", | |
&xla_feature_dropout); | |
register_extension_backend_op( | |
Backend::TPU, | |
"feature_dropout_(Tensor, double, bool) -> Tensor", | |
&xla_feature_dropout_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"alpha_dropout(Tensor, double, bool) -> Tensor", | |
&xla_alpha_dropout); | |
register_extension_backend_op( | |
Backend::TPU, | |
"alpha_dropout_(Tensor, double, bool) -> Tensor", | |
&xla_alpha_dropout_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"feature_alpha_dropout(Tensor, double, bool) -> Tensor", | |
&xla_feature_alpha_dropout); | |
register_extension_backend_op( | |
Backend::TPU, | |
"feature_alpha_dropout_(Tensor, double, bool) -> Tensor", | |
&xla_feature_alpha_dropout_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"abs(Tensor) -> Tensor", | |
&xla_abs); | |
register_extension_backend_op( | |
Backend::TPU, | |
"abs_(Tensor) -> Tensor", | |
&xla_abs_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"abs_out(Tensor, Tensor) -> Tensor", | |
&xla_abs_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"acos(Tensor) -> Tensor", | |
&xla_acos); | |
register_extension_backend_op( | |
Backend::TPU, | |
"acos_(Tensor) -> Tensor", | |
&xla_acos_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"acos_out(Tensor, Tensor) -> Tensor", | |
&xla_acos_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool1d(Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool1d(Tensor, IntList) -> Tensor", | |
&xla_adaptive_avg_pool1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool1d(Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla_adaptive_max_pool1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"add(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_add); | |
register_extension_backend_op( | |
Backend::TPU, | |
"add_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_add_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"add_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_add_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"add(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_add_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"add_(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_add__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addmv(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addmv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addmv_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addmv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addmv_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addmv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addr(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addr_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addr_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addr_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"affine_grid_generator(Tensor, IntList) -> Tensor", | |
&xla_affine_grid_generator); | |
register_extension_backend_op( | |
Backend::TPU, | |
"affine_grid_generator_backward(Tensor, IntList) -> Tensor", | |
&xla_affine_grid_generator_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"all(Tensor, int64_t, bool) -> Tensor", | |
&xla_all); | |
register_extension_backend_op( | |
Backend::TPU, | |
"all_out(Tensor, Tensor, int64_t, bool) -> Tensor", | |
&xla_all_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"allclose(Tensor, Tensor, double, double, bool) -> bool", | |
&xla_allclose); | |
register_extension_backend_op( | |
Backend::TPU, | |
"any(Tensor, int64_t, bool) -> Tensor", | |
&xla_any); | |
register_extension_backend_op( | |
Backend::TPU, | |
"any_out(Tensor, Tensor, int64_t, bool) -> Tensor", | |
&xla_any_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"arange_out(Tensor, Scalar) -> Tensor", | |
&xla_arange_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"arange_out(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_arange_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_dim_arange(Tensor, int64_t) -> Tensor", | |
&xla__dim_arange); | |
register_extension_backend_op( | |
Backend::TPU, | |
"argmax(Tensor, int64_t, bool) -> Tensor", | |
&xla_argmax); | |
register_extension_backend_op( | |
Backend::TPU, | |
"argmax(Tensor) -> Tensor", | |
&xla_argmax_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_argmax(Tensor, int64_t, bool) -> Tensor", | |
&xla__argmax); | |
register_extension_backend_op( | |
Backend::TPU, | |
"argmin(Tensor, int64_t, bool) -> Tensor", | |
&xla_argmin); | |
register_extension_backend_op( | |
Backend::TPU, | |
"argmin(Tensor) -> Tensor", | |
&xla_argmin_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_argmin(Tensor, int64_t, bool) -> Tensor", | |
&xla__argmin); | |
register_extension_backend_op( | |
Backend::TPU, | |
"as_strided(Tensor, IntList, IntList, c10::optional<int64_t>) -> Tensor", | |
&xla_as_strided); | |
register_extension_backend_op( | |
Backend::TPU, | |
"as_strided_(Tensor, IntList, IntList, c10::optional<int64_t>) -> Tensor", | |
&xla_as_strided_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"asin(Tensor) -> Tensor", | |
&xla_asin); | |
register_extension_backend_op( | |
Backend::TPU, | |
"asin_(Tensor) -> Tensor", | |
&xla_asin_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"asin_out(Tensor, Tensor) -> Tensor", | |
&xla_asin_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"atan(Tensor) -> Tensor", | |
&xla_atan); | |
register_extension_backend_op( | |
Backend::TPU, | |
"atan_(Tensor) -> Tensor", | |
&xla_atan_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"atan_out(Tensor, Tensor) -> Tensor", | |
&xla_atan_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"baddbmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_baddbmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"baddbmm_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_baddbmm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_baddbmm_mkl_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__baddbmm_mkl_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"baddbmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_baddbmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"batch_norm(Tensor, Tensor, Tensor, Tensor, Tensor, bool, double, double, bool) -> Tensor", | |
&xla_batch_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bernoulli(Tensor, Generator) -> Tensor", | |
&xla_bernoulli); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bernoulli_out(Tensor, Tensor, Generator) -> Tensor", | |
&xla_bernoulli_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bernoulli_(Tensor, Tensor, Generator) -> Tensor", | |
&xla_bernoulli_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bernoulli_(Tensor, double, Generator) -> Tensor", | |
&xla_bernoulli__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bernoulli(Tensor, double, Generator) -> Tensor", | |
&xla_bernoulli_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bilinear(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_bilinear); | |
register_extension_backend_op( | |
Backend::TPU, | |
"binary_cross_entropy_with_logits(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_binary_cross_entropy_with_logits); | |
register_extension_backend_op( | |
Backend::TPU, | |
"binary_cross_entropy_with_logits_backward(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_binary_cross_entropy_with_logits_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bincount(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_bincount); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bmm(Tensor, Tensor) -> Tensor", | |
&xla_bmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"bmm_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_bmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"broadcast_tensors(TensorList) -> std::vector<Tensor>", | |
&xla_broadcast_tensors); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cat(TensorList, int64_t) -> Tensor", | |
&xla_cat); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cat_out(Tensor, TensorList, int64_t) -> Tensor", | |
&xla_cat_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ceil(Tensor) -> Tensor", | |
&xla_ceil); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ceil_(Tensor) -> Tensor", | |
&xla_ceil_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ceil_out(Tensor, Tensor) -> Tensor", | |
&xla_ceil_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"chain_matmul(TensorList) -> Tensor", | |
&xla_chain_matmul); | |
register_extension_backend_op( | |
Backend::TPU, | |
"chunk(Tensor, int64_t, int64_t) -> std::vector<Tensor>", | |
&xla_chunk); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp(Tensor, c10::optional<Scalar>, c10::optional<Scalar>) -> Tensor", | |
&xla_clamp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_(Tensor, c10::optional<Scalar>, c10::optional<Scalar>) -> Tensor", | |
&xla_clamp_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_out(Tensor, Tensor, c10::optional<Scalar>, c10::optional<Scalar>) -> Tensor", | |
&xla_clamp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_max(Tensor, Scalar) -> Tensor", | |
&xla_clamp_max); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_max_(Tensor, Scalar) -> Tensor", | |
&xla_clamp_max_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_max_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_clamp_max_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_min(Tensor, Scalar) -> Tensor", | |
&xla_clamp_min); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_min_(Tensor, Scalar) -> Tensor", | |
&xla_clamp_min_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clamp_min_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_clamp_min_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"constant_pad_nd(Tensor, IntList, Scalar) -> Tensor", | |
&xla_constant_pad_nd); | |
register_extension_backend_op( | |
Backend::TPU, | |
"contiguous(Tensor) -> Tensor", | |
&xla_contiguous); | |
register_extension_backend_op( | |
Backend::TPU, | |
"convolution(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, IntList, int64_t) -> Tensor", | |
&xla_convolution); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_convolution(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, IntList, int64_t, bool, bool, bool) -> Tensor", | |
&xla__convolution); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_convolution_nogroup(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, IntList) -> Tensor", | |
&xla__convolution_nogroup); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_convolution_double_backward(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, IntList, int64_t, bool, bool, bool, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__convolution_double_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv1d(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t) -> Tensor", | |
&xla_conv1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv2d(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t) -> Tensor", | |
&xla_conv2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv3d(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t) -> Tensor", | |
&xla_conv3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv_tbc(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_conv_tbc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv_tbc_backward(Tensor, Tensor, Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_conv_tbc_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv_transpose1d(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t, IntList) -> Tensor", | |
&xla_conv_transpose1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv_transpose2d(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t, IntList) -> Tensor", | |
&xla_conv_transpose2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"conv_transpose3d(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t, IntList) -> Tensor", | |
&xla_conv_transpose3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s_copy_(Tensor, Tensor, bool) -> Tensor", | |
&xla_s_copy_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_s_copy_from(Tensor, Tensor, bool) -> Tensor", | |
&xla__s_copy_from); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_copy_same_type_(Tensor, Tensor) -> void", | |
&xla__copy_same_type_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cos(Tensor) -> Tensor", | |
&xla_cos); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cos_(Tensor) -> Tensor", | |
&xla_cos_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cos_out(Tensor, Tensor) -> Tensor", | |
&xla_cos_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cosh(Tensor) -> Tensor", | |
&xla_cosh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cosh_(Tensor) -> Tensor", | |
&xla_cosh_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cosh_out(Tensor, Tensor) -> Tensor", | |
&xla_cosh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cosine_embedding_loss(Tensor, Tensor, Tensor, double, int64_t) -> Tensor", | |
&xla_cosine_embedding_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumsum(Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_cumsum); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumsum(Tensor, int64_t) -> Tensor", | |
&xla_cumsum_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumsum_out(Tensor, Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_cumsum_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumsum_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_cumsum_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumprod(Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_cumprod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumprod(Tensor, int64_t) -> Tensor", | |
&xla_cumprod_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumprod_out(Tensor, Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_cumprod_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cumprod_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_cumprod_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ctc_loss(Tensor, Tensor, IntList, IntList, int64_t, int64_t) -> Tensor", | |
&xla_ctc_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ctc_loss(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_ctc_loss_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_ctc_loss(Tensor, Tensor, IntList, IntList, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__ctc_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_ctc_loss_backward(Tensor, Tensor, Tensor, IntList, IntList, Tensor, Tensor, int64_t) -> Tensor", | |
&xla__ctc_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"det(Tensor) -> Tensor", | |
&xla_det); | |
register_extension_backend_op( | |
Backend::TPU, | |
"diag_embed(Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla_diag_embed); | |
register_extension_backend_op( | |
Backend::TPU, | |
"diagflat(Tensor, int64_t) -> Tensor", | |
&xla_diagflat); | |
register_extension_backend_op( | |
Backend::TPU, | |
"diagonal(Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla_diagonal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"div(Tensor, Tensor) -> Tensor", | |
&xla_div); | |
register_extension_backend_op( | |
Backend::TPU, | |
"div_(Tensor, Tensor) -> Tensor", | |
&xla_div_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"div_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_div_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"div(Tensor, Scalar) -> Tensor", | |
&xla_div_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"div_(Tensor, Scalar) -> Tensor", | |
&xla_div__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"dot(Tensor, Tensor) -> Tensor", | |
&xla_dot); | |
register_extension_backend_op( | |
Backend::TPU, | |
"dot_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_dot_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"einsum(std::string, TensorList) -> Tensor", | |
&xla_einsum); | |
register_extension_backend_op( | |
Backend::TPU, | |
"embedding(Tensor, Tensor, int64_t, bool, bool) -> Tensor", | |
&xla_embedding); | |
register_extension_backend_op( | |
Backend::TPU, | |
"embedding_backward(Tensor, Tensor, int64_t, int64_t, bool, bool) -> Tensor", | |
&xla_embedding_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"embedding_dense_backward(Tensor, Tensor, int64_t, int64_t, bool) -> Tensor", | |
&xla_embedding_dense_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"embedding_renorm_(Tensor, Tensor, double, double) -> Tensor", | |
&xla_embedding_renorm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"embedding_sparse_backward(Tensor, Tensor, int64_t, int64_t, bool) -> Tensor", | |
&xla_embedding_sparse_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"embedding_bag(Tensor, Tensor, Tensor, bool, int64_t, bool) -> std::tuple<Tensor,Tensor,Tensor,Tensor>", | |
&xla_embedding_bag); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_embedding_bag(Tensor, Tensor, Tensor, bool, int64_t, bool) -> std::tuple<Tensor,Tensor,Tensor,Tensor>", | |
&xla__embedding_bag); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_embedding_bag_backward(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, bool, int64_t, bool) -> Tensor", | |
&xla__embedding_bag_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_embedding_bag_sparse_backward(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, bool, int64_t) -> Tensor", | |
&xla__embedding_bag_sparse_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_embedding_bag_dense_backward(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, bool, int64_t) -> Tensor", | |
&xla__embedding_bag_dense_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"empty(IntList, TensorOptions) -> Tensor", | |
&xla_empty); | |
register_extension_backend_op( | |
Backend::TPU, | |
"resize_(Tensor, IntList) -> Tensor", | |
&xla_resize_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"empty_out(Tensor, IntList) -> Tensor", | |
&xla_empty_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"empty_like(Tensor) -> Tensor", | |
&xla_empty_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"empty_strided(IntList, IntList, TensorOptions) -> Tensor", | |
&xla_empty_strided); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erf(Tensor) -> Tensor", | |
&xla_erf); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erf_(Tensor) -> Tensor", | |
&xla_erf_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erf_out(Tensor, Tensor) -> Tensor", | |
&xla_erf_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erfc(Tensor) -> Tensor", | |
&xla_erfc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erfc_(Tensor) -> Tensor", | |
&xla_erfc_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erfc_out(Tensor, Tensor) -> Tensor", | |
&xla_erfc_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"exp(Tensor) -> Tensor", | |
&xla_exp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"exp_(Tensor) -> Tensor", | |
&xla_exp_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"exp_out(Tensor, Tensor) -> Tensor", | |
&xla_exp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"expm1(Tensor) -> Tensor", | |
&xla_expm1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"expm1_(Tensor) -> Tensor", | |
&xla_expm1_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"expm1_out(Tensor, Tensor) -> Tensor", | |
&xla_expm1_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"expand(Tensor, IntList, bool) -> Tensor", | |
&xla_expand); | |
register_extension_backend_op( | |
Backend::TPU, | |
"expand_as(Tensor, Tensor) -> Tensor", | |
&xla_expand_as); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eye_out(Tensor, int64_t) -> Tensor", | |
&xla_eye_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eye_out(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_eye_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"flatten(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_flatten); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fill_(Tensor, Scalar) -> Tensor", | |
&xla_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fill_(Tensor, Tensor) -> Tensor", | |
&xla_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"floor(Tensor) -> Tensor", | |
&xla_floor); | |
register_extension_backend_op( | |
Backend::TPU, | |
"floor_(Tensor) -> Tensor", | |
&xla_floor_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"floor_out(Tensor, Tensor) -> Tensor", | |
&xla_floor_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"full_out(Tensor, IntList, Scalar) -> Tensor", | |
&xla_full_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"full_like(Tensor, Scalar) -> Tensor", | |
&xla_full_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"grid_sampler(Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_grid_sampler); | |
register_extension_backend_op( | |
Backend::TPU, | |
"grid_sampler_2d(Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_grid_sampler_2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"grid_sampler_2d_backward(Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_grid_sampler_2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"grid_sampler_3d(Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_grid_sampler_3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"grid_sampler_3d_backward(Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_grid_sampler_3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hinge_embedding_loss(Tensor, Tensor, double, int64_t) -> Tensor", | |
&xla_hinge_embedding_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ger(Tensor, Tensor) -> Tensor", | |
&xla_ger); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ger_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_ger_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gesv(Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_gesv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gesv_out(Tensor, Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_gesv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_gesv_helper(Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__gesv_helper); | |
register_extension_backend_op( | |
Backend::TPU, | |
"group_norm(Tensor, int64_t, Tensor, Tensor, double, bool) -> Tensor", | |
&xla_group_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fft(Tensor, int64_t, bool) -> Tensor", | |
&xla_fft); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ifft(Tensor, int64_t, bool) -> Tensor", | |
&xla_ifft); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rfft(Tensor, int64_t, bool, bool) -> Tensor", | |
&xla_rfft); | |
register_extension_backend_op( | |
Backend::TPU, | |
"irfft(Tensor, int64_t, bool, bool, IntList) -> Tensor", | |
&xla_irfft); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_fft_with_size(Tensor, int64_t, bool, bool, bool, IntList, bool, bool, IntList) -> Tensor", | |
&xla__fft_with_size); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cufft_set_plan_cache_max_size(int64_t) -> void", | |
&xla__cufft_set_plan_cache_max_size); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index(Tensor, TensorList) -> Tensor", | |
&xla_index); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_copy_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla_index_copy_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_put(Tensor, TensorList, Tensor, bool) -> Tensor", | |
&xla_index_put); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_put_(Tensor, TensorList, Tensor, bool) -> Tensor", | |
&xla_index_put_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"instance_norm(Tensor, Tensor, Tensor, Tensor, Tensor, bool, double, double, bool) -> Tensor", | |
&xla_instance_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"inverse(Tensor) -> Tensor", | |
&xla_inverse); | |
register_extension_backend_op( | |
Backend::TPU, | |
"inverse_out(Tensor, Tensor) -> Tensor", | |
&xla_inverse_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_inverse_helper(Tensor) -> Tensor", | |
&xla__inverse_helper); | |
register_extension_backend_op( | |
Backend::TPU, | |
"isclose(Tensor, Tensor, double, double, bool) -> Tensor", | |
&xla_isclose); | |
register_extension_backend_op( | |
Backend::TPU, | |
"isnan(Tensor) -> Tensor", | |
&xla_isnan); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_distributed(Tensor) -> bool", | |
&xla_is_distributed); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_floating_point(Tensor) -> bool", | |
&xla_is_floating_point); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_complex(Tensor) -> bool", | |
&xla_is_complex); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_nonzero(Tensor) -> bool", | |
&xla_is_nonzero); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_same_size(Tensor, Tensor) -> bool", | |
&xla_is_same_size); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_signed(Tensor) -> bool", | |
&xla_is_signed); | |
register_extension_backend_op( | |
Backend::TPU, | |
"kl_div(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_kl_div); | |
register_extension_backend_op( | |
Backend::TPU, | |
"kl_div_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_kl_div_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"kthvalue(Tensor, int64_t, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_kthvalue); | |
register_extension_backend_op( | |
Backend::TPU, | |
"kthvalue_out(Tensor, Tensor, Tensor, int64_t, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_kthvalue_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"layer_norm(Tensor, IntList, Tensor, Tensor, double, bool) -> Tensor", | |
&xla_layer_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"linear(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_linear); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fbgemm_linear_int8_weight(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Tensor) -> Tensor", | |
&xla_fbgemm_linear_int8_weight); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fbgemm_linear_quantize_weight(Tensor) -> std::tuple<Tensor,Tensor,double,int64_t>", | |
&xla_fbgemm_linear_quantize_weight); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fbgemm_pack_quantized_matrix(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_fbgemm_pack_quantized_matrix); | |
register_extension_backend_op( | |
Backend::TPU, | |
"linspace_out(Tensor, Scalar, Scalar, int64_t) -> Tensor", | |
&xla_linspace_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log(Tensor) -> Tensor", | |
&xla_log); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_(Tensor) -> Tensor", | |
&xla_log_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_out(Tensor, Tensor) -> Tensor", | |
&xla_log_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log10(Tensor) -> Tensor", | |
&xla_log10); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log10_(Tensor) -> Tensor", | |
&xla_log10_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log10_out(Tensor, Tensor) -> Tensor", | |
&xla_log10_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log1p(Tensor) -> Tensor", | |
&xla_log1p); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log1p_(Tensor) -> Tensor", | |
&xla_log1p_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log1p_out(Tensor, Tensor) -> Tensor", | |
&xla_log1p_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log2(Tensor) -> Tensor", | |
&xla_log2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log2_(Tensor) -> Tensor", | |
&xla_log2_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log2_out(Tensor, Tensor) -> Tensor", | |
&xla_log2_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"logdet(Tensor) -> Tensor", | |
&xla_logdet); | |
register_extension_backend_op( | |
Backend::TPU, | |
"logspace_out(Tensor, Scalar, Scalar, int64_t) -> Tensor", | |
&xla_logspace_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_softmax(Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_log_softmax); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_softmax(Tensor, int64_t) -> Tensor", | |
&xla_log_softmax_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_log_softmax(Tensor, int64_t, bool) -> Tensor", | |
&xla__log_softmax); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_log_softmax_backward_data(Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla__log_softmax_backward_data); | |
register_extension_backend_op( | |
Backend::TPU, | |
"logsumexp(Tensor, int64_t, bool) -> Tensor", | |
&xla_logsumexp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"logsumexp_out(Tensor, Tensor, int64_t, bool) -> Tensor", | |
&xla_logsumexp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"margin_ranking_loss(Tensor, Tensor, Tensor, double, int64_t) -> Tensor", | |
&xla_margin_ranking_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"matmul(Tensor, Tensor) -> Tensor", | |
&xla_matmul); | |
register_extension_backend_op( | |
Backend::TPU, | |
"matmul_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_matmul_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"matrix_rank(Tensor, double, bool) -> Tensor", | |
&xla_matrix_rank); | |
register_extension_backend_op( | |
Backend::TPU, | |
"matrix_rank(Tensor, bool) -> Tensor", | |
&xla_matrix_rank_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"matrix_power(Tensor, int64_t) -> Tensor", | |
&xla_matrix_power); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_values(Tensor, int64_t, bool) -> Tensor", | |
&xla_max_values); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool1d_with_indices(Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max_pool1d_with_indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool1d(Tensor, IntList, IntList, IntList, IntList, bool) -> Tensor", | |
&xla_max_pool1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool2d(Tensor, IntList, IntList, IntList, IntList, bool) -> Tensor", | |
&xla_max_pool2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool3d(Tensor, IntList, IntList, IntList, IntList, bool) -> Tensor", | |
&xla_max_pool3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean(Tensor, ScalarType) -> Tensor", | |
&xla_mean); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean(Tensor) -> Tensor", | |
&xla_mean_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean(Tensor, IntList, bool, ScalarType) -> Tensor", | |
&xla_mean_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean(Tensor, IntList, bool) -> Tensor", | |
&xla_mean_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean(Tensor, IntList, ScalarType) -> Tensor", | |
&xla_mean_4); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean_out(Tensor, Tensor, IntList, bool, ScalarType) -> Tensor", | |
&xla_mean_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_mean_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mean_out(Tensor, Tensor, IntList, ScalarType) -> Tensor", | |
&xla_mean_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"median(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_median); | |
register_extension_backend_op( | |
Backend::TPU, | |
"median_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_median_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"min(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_min); | |
register_extension_backend_op( | |
Backend::TPU, | |
"min_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_min_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"min_values(Tensor, int64_t, bool) -> Tensor", | |
&xla_min_values); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mkldnn_convolution(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t) -> Tensor", | |
&xla_mkldnn_convolution); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mkldnn_convolution_backward_input(IntList, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool) -> Tensor", | |
&xla_mkldnn_convolution_backward_input); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mkldnn_convolution_backward_weights(IntList, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_mkldnn_convolution_backward_weights); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mkldnn_convolution_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_mkldnn_convolution_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_batch_norm(Tensor, Tensor, Tensor, Tensor, Tensor, bool, double, double) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_miopen_batch_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_batch_norm_backward(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, double) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_miopen_batch_norm_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool, bool) -> Tensor", | |
&xla_miopen_convolution); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_backward_input(IntList, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool, bool) -> Tensor", | |
&xla_miopen_convolution_backward_input); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool, bool, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_miopen_convolution_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_backward_bias(Tensor) -> Tensor", | |
&xla_miopen_convolution_backward_bias); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_backward_weight(IntList, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool, bool) -> Tensor", | |
&xla_miopen_convolution_backward_weight); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_transpose(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, int64_t, bool, bool) -> Tensor", | |
&xla_miopen_convolution_transpose); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_transpose_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, int64_t, bool, bool, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_miopen_convolution_transpose_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_transpose_backward_input(Tensor, Tensor, IntList, IntList, IntList, int64_t, bool, bool) -> Tensor", | |
&xla_miopen_convolution_transpose_backward_input); | |
register_extension_backend_op( | |
Backend::TPU, | |
"miopen_convolution_transpose_backward_weight(IntList, Tensor, Tensor, IntList, IntList, IntList, int64_t, bool, bool) -> Tensor", | |
&xla_miopen_convolution_transpose_backward_weight); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mm(Tensor, Tensor) -> Tensor", | |
&xla_mm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mm_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_mm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_mm(Tensor, Tensor) -> Tensor", | |
&xla__sparse_mm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mode(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_mode); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mode_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_mode_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mul(Tensor, Tensor) -> Tensor", | |
&xla_mul); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mul_(Tensor, Tensor) -> Tensor", | |
&xla_mul_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mul_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_mul_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mul(Tensor, Scalar) -> Tensor", | |
&xla_mul_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mul_(Tensor, Scalar) -> Tensor", | |
&xla_mul__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mv(Tensor, Tensor) -> Tensor", | |
&xla_mv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mv_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_mv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mvlgamma(Tensor, int64_t) -> Tensor", | |
&xla_mvlgamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mvlgamma_(Tensor, int64_t) -> Tensor", | |
&xla_mvlgamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"narrow_copy(Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla_narrow_copy); | |
register_extension_backend_op( | |
Backend::TPU, | |
"narrow(Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla_narrow); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_batch_norm(Tensor, Tensor, Tensor, Tensor, Tensor, bool, double, double) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_native_batch_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_batch_norm_backward(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, bool, double, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_native_batch_norm_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"batch_norm_update_stats(Tensor, Tensor, Tensor, double) -> std::tuple<Tensor,Tensor>", | |
&xla_batch_norm_update_stats); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ones_out(Tensor, IntList) -> Tensor", | |
&xla_ones_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ones_like(Tensor) -> Tensor", | |
&xla_ones_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pairwise_distance(Tensor, Tensor, double, double, bool) -> Tensor", | |
&xla_pairwise_distance); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pdist(Tensor, double) -> Tensor", | |
&xla_pdist); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_pdist_forward(Tensor, double) -> Tensor", | |
&xla__pdist_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_pdist_backward(Tensor, Tensor, double, Tensor) -> Tensor", | |
&xla__pdist_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cosine_similarity(Tensor, Tensor, int64_t, double) -> Tensor", | |
&xla_cosine_similarity); | |
register_extension_backend_op( | |
Backend::TPU, | |
"permute(Tensor, IntList) -> Tensor", | |
&xla_permute); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pixel_shuffle(Tensor, int64_t) -> Tensor", | |
&xla_pixel_shuffle); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pin_memory(Tensor) -> Tensor", | |
&xla_pin_memory); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pinverse(Tensor, double) -> Tensor", | |
&xla_pinverse); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rand_out(Tensor, IntList) -> Tensor", | |
&xla_rand_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rand_out(Tensor, IntList, Generator) -> Tensor", | |
&xla_rand_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rand_like(Tensor) -> Tensor", | |
&xla_rand_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randint_out(Tensor, int64_t, IntList) -> Tensor", | |
&xla_randint_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randint_out(Tensor, int64_t, IntList, Generator) -> Tensor", | |
&xla_randint_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randint_out(Tensor, int64_t, int64_t, IntList) -> Tensor", | |
&xla_randint_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randint_out(Tensor, int64_t, int64_t, IntList, Generator) -> Tensor", | |
&xla_randint_out_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randint_like(Tensor, int64_t) -> Tensor", | |
&xla_randint_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randint_like(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_randint_like_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randn_out(Tensor, IntList) -> Tensor", | |
&xla_randn_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randn_out(Tensor, IntList, Generator) -> Tensor", | |
&xla_randn_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randn_like(Tensor) -> Tensor", | |
&xla_randn_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randperm_out(Tensor, int64_t) -> Tensor", | |
&xla_randperm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"randperm_out(Tensor, int64_t, Generator) -> Tensor", | |
&xla_randperm_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"range_out(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_range_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"repeat(Tensor, IntList) -> Tensor", | |
&xla_repeat); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reshape(Tensor, IntList) -> Tensor", | |
&xla_reshape); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reshape_as(Tensor, Tensor) -> Tensor", | |
&xla_reshape_as); | |
register_extension_backend_op( | |
Backend::TPU, | |
"RoiPooling2d_forward(Tensor, Tensor, int64_t, int64_t, double) -> std::tuple<Tensor,Tensor>", | |
&xla_RoiPooling2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"RoiPooling2d_backward(Tensor, Tensor, int64_t, int64_t, double, Tensor, Tensor) -> Tensor", | |
&xla_RoiPooling2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"round(Tensor) -> Tensor", | |
&xla_round); | |
register_extension_backend_op( | |
Backend::TPU, | |
"round_(Tensor) -> Tensor", | |
&xla_round_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"round_out(Tensor, Tensor) -> Tensor", | |
&xla_round_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu(Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla_rrelu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu_(Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla_rrelu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"relu(Tensor) -> Tensor", | |
&xla_relu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"relu_(Tensor) -> Tensor", | |
&xla_relu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prelu(Tensor, Tensor) -> Tensor", | |
&xla_prelu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prelu_backward(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_prelu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardshrink(Tensor, Scalar) -> Tensor", | |
&xla_hardshrink); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardshrink_backward(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_hardshrink_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rsqrt(Tensor) -> Tensor", | |
&xla_rsqrt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rsqrt_(Tensor) -> Tensor", | |
&xla_rsqrt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rsqrt_out(Tensor, Tensor) -> Tensor", | |
&xla_rsqrt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"select(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_select); | |
register_extension_backend_op( | |
Backend::TPU, | |
"selu(Tensor) -> Tensor", | |
&xla_selu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"selu_(Tensor) -> Tensor", | |
&xla_selu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"celu(Tensor, Scalar) -> Tensor", | |
&xla_celu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"celu_(Tensor, Scalar) -> Tensor", | |
&xla_celu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sigmoid(Tensor) -> Tensor", | |
&xla_sigmoid); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sigmoid_(Tensor) -> Tensor", | |
&xla_sigmoid_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sigmoid_out(Tensor, Tensor) -> Tensor", | |
&xla_sigmoid_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sin(Tensor) -> Tensor", | |
&xla_sin); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sin_(Tensor) -> Tensor", | |
&xla_sin_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sin_out(Tensor, Tensor) -> Tensor", | |
&xla_sin_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sinh(Tensor) -> Tensor", | |
&xla_sinh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sinh_(Tensor) -> Tensor", | |
&xla_sinh_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sinh_out(Tensor, Tensor) -> Tensor", | |
&xla_sinh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"detach(Tensor) -> Tensor", | |
&xla_detach); | |
register_extension_backend_op( | |
Backend::TPU, | |
"detach_(Tensor) -> Tensor", | |
&xla_detach_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"size(Tensor, int64_t) -> int64_t", | |
&xla_size); | |
register_extension_backend_op( | |
Backend::TPU, | |
"slice(Tensor, int64_t, int64_t, int64_t, int64_t) -> Tensor", | |
&xla_slice); | |
register_extension_backend_op( | |
Backend::TPU, | |
"slogdet(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_slogdet); | |
register_extension_backend_op( | |
Backend::TPU, | |
"smm(Tensor, Tensor) -> Tensor", | |
&xla_smm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softmax(Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_softmax); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softmax(Tensor, int64_t) -> Tensor", | |
&xla_softmax_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_softmax(Tensor, int64_t, bool) -> Tensor", | |
&xla__softmax); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_softmax_backward_data(Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla__softmax_backward_data); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_add_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla__sparse_add_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_dense_add_out(Tensor, Tensor, SparseTensorRef, Scalar) -> Tensor", | |
&xla__sparse_dense_add_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_div_zerodim_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__sparse_div_zerodim_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_div_scalar_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__sparse_div_scalar_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_mul_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__sparse_mul_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_mul_zerodim_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__sparse_mul_zerodim_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_mul_scalar_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla__sparse_mul_scalar_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"split(Tensor, int64_t, int64_t) -> std::vector<Tensor>", | |
&xla_split); | |
register_extension_backend_op( | |
Backend::TPU, | |
"split_with_sizes(Tensor, IntList, int64_t) -> std::vector<Tensor>", | |
&xla_split_with_sizes); | |
register_extension_backend_op( | |
Backend::TPU, | |
"squeeze(Tensor) -> Tensor", | |
&xla_squeeze); | |
register_extension_backend_op( | |
Backend::TPU, | |
"squeeze(Tensor, int64_t) -> Tensor", | |
&xla_squeeze_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"squeeze_(Tensor) -> Tensor", | |
&xla_squeeze_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"squeeze_(Tensor, int64_t) -> Tensor", | |
&xla_squeeze__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sspaddmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_sspaddmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sspaddmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_sspaddmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"stack(TensorList, int64_t) -> Tensor", | |
&xla_stack); | |
register_extension_backend_op( | |
Backend::TPU, | |
"stack_out(Tensor, TensorList, int64_t) -> Tensor", | |
&xla_stack_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"stft(Tensor, int64_t, c10::optional<int64_t>, c10::optional<int64_t>, Tensor, bool, bool) -> Tensor", | |
&xla_stft); | |
register_extension_backend_op( | |
Backend::TPU, | |
"stride(Tensor, int64_t) -> int64_t", | |
&xla_stride); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum(Tensor, ScalarType) -> Tensor", | |
&xla_sum); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum(Tensor) -> Tensor", | |
&xla_sum_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum(Tensor, IntList, bool, ScalarType) -> Tensor", | |
&xla_sum_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum(Tensor, IntList, bool) -> Tensor", | |
&xla_sum_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum(Tensor, IntList, ScalarType) -> Tensor", | |
&xla_sum_4); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum_out(Tensor, Tensor, IntList, bool, ScalarType) -> Tensor", | |
&xla_sum_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_sum_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum_out(Tensor, Tensor, IntList, ScalarType) -> Tensor", | |
&xla_sum_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sum_to_size(Tensor, IntList) -> Tensor", | |
&xla_sum_to_size); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sqrt(Tensor) -> Tensor", | |
&xla_sqrt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sqrt_(Tensor) -> Tensor", | |
&xla_sqrt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sqrt_out(Tensor, Tensor) -> Tensor", | |
&xla_sqrt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"std(Tensor, bool) -> Tensor", | |
&xla_std); | |
register_extension_backend_op( | |
Backend::TPU, | |
"std(Tensor, IntList, bool, bool) -> Tensor", | |
&xla_std_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"std_out(Tensor, Tensor, IntList, bool, bool) -> Tensor", | |
&xla_std_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod(Tensor, ScalarType) -> Tensor", | |
&xla_prod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod(Tensor) -> Tensor", | |
&xla_prod_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod(Tensor, int64_t, bool, ScalarType) -> Tensor", | |
&xla_prod_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod(Tensor, int64_t, bool) -> Tensor", | |
&xla_prod_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod(Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_prod_4); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod_out(Tensor, Tensor, int64_t, bool, ScalarType) -> Tensor", | |
&xla_prod_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod_out(Tensor, Tensor, int64_t, bool) -> Tensor", | |
&xla_prod_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"prod_out(Tensor, Tensor, int64_t, ScalarType) -> Tensor", | |
&xla_prod_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"t(Tensor) -> Tensor", | |
&xla_t); | |
register_extension_backend_op( | |
Backend::TPU, | |
"t_(Tensor) -> Tensor", | |
&xla_t_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tan(Tensor) -> Tensor", | |
&xla_tan); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tan_(Tensor) -> Tensor", | |
&xla_tan_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tan_out(Tensor, Tensor) -> Tensor", | |
&xla_tan_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tanh(Tensor) -> Tensor", | |
&xla_tanh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tanh_(Tensor) -> Tensor", | |
&xla_tanh_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tanh_out(Tensor, Tensor) -> Tensor", | |
&xla_tanh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tensordot(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla_tensordot); | |
register_extension_backend_op( | |
Backend::TPU, | |
"threshold(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_threshold); | |
register_extension_backend_op( | |
Backend::TPU, | |
"threshold_(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_threshold_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"threshold_out(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_threshold_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"threshold_backward(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_threshold_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"transpose(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_transpose); | |
register_extension_backend_op( | |
Backend::TPU, | |
"transpose_(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_transpose_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"one_hot(Tensor, int64_t) -> Tensor", | |
&xla_one_hot); | |
register_extension_backend_op( | |
Backend::TPU, | |
"flip(Tensor, IntList) -> Tensor", | |
&xla_flip); | |
register_extension_backend_op( | |
Backend::TPU, | |
"roll(Tensor, IntList, IntList) -> Tensor", | |
&xla_roll); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rot90(Tensor, int64_t, IntList) -> Tensor", | |
&xla_rot90); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_trilinear(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, int64_t) -> Tensor", | |
&xla__trilinear); | |
register_extension_backend_op( | |
Backend::TPU, | |
"triplet_margin_loss(Tensor, Tensor, Tensor, double, double, double, bool, int64_t) -> Tensor", | |
&xla_triplet_margin_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"trunc(Tensor) -> Tensor", | |
&xla_trunc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"trunc_(Tensor) -> Tensor", | |
&xla_trunc_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"trunc_out(Tensor, Tensor) -> Tensor", | |
&xla_trunc_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"type_as(Tensor, Tensor) -> Tensor", | |
&xla_type_as); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_unique(Tensor, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__unique); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_unique_dim(Tensor, int64_t, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__unique_dim); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_unsafe_view(Tensor, IntList) -> Tensor", | |
&xla__unsafe_view); | |
register_extension_backend_op( | |
Backend::TPU, | |
"unsqueeze(Tensor, int64_t) -> Tensor", | |
&xla_unsqueeze); | |
register_extension_backend_op( | |
Backend::TPU, | |
"unsqueeze_(Tensor, int64_t) -> Tensor", | |
&xla_unsqueeze_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"var(Tensor, bool) -> Tensor", | |
&xla_var); | |
register_extension_backend_op( | |
Backend::TPU, | |
"var(Tensor, IntList, bool, bool) -> Tensor", | |
&xla_var_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"var_out(Tensor, Tensor, IntList, bool, bool) -> Tensor", | |
&xla_var_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"view_as(Tensor, Tensor) -> Tensor", | |
&xla_view_as); | |
register_extension_backend_op( | |
Backend::TPU, | |
"where(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_where); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_s_where(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__s_where); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm_except_dim(Tensor, int64_t, int64_t) -> Tensor", | |
&xla_norm_except_dim); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_weight_norm(Tensor, Tensor, int64_t) -> Tensor", | |
&xla__weight_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_weight_norm_cuda_interface(Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__weight_norm_cuda_interface); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_weight_norm_cuda_interface_backward(Tensor, Tensor, Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__weight_norm_cuda_interface_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_weight_norm_differentiable_backward(Tensor, Tensor, Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__weight_norm_differentiable_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"zeros_out(Tensor, IntList) -> Tensor", | |
&xla_zeros_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"zeros_like(Tensor) -> Tensor", | |
&xla_zeros_like); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_standard_gamma_grad(Tensor, Tensor) -> Tensor", | |
&xla__standard_gamma_grad); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_standard_gamma(Tensor, Generator) -> Tensor", | |
&xla__standard_gamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"poisson(Tensor, Generator) -> Tensor", | |
&xla_poisson); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_norm(Tensor, Scalar) -> Tensor", | |
&xla_native_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_sum(Tensor) -> Tensor", | |
&xla__sparse_sum); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_sum(Tensor, ScalarType) -> Tensor", | |
&xla__sparse_sum_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_sum(Tensor, IntList) -> Tensor", | |
&xla__sparse_sum_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_sum(Tensor, IntList, ScalarType) -> Tensor", | |
&xla__sparse_sum_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_sum_backward(Tensor, Tensor, IntList) -> Tensor", | |
&xla__sparse_sum_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm(Tensor, c10::optional<Scalar>, ScalarType) -> Tensor", | |
&xla_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm(Tensor, Scalar) -> Tensor", | |
&xla_norm_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm(Tensor, c10::optional<Scalar>, IntList, bool, ScalarType) -> Tensor", | |
&xla_norm_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm(Tensor, c10::optional<Scalar>, IntList, bool) -> Tensor", | |
&xla_norm_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm_out(Tensor, Tensor, c10::optional<Scalar>, IntList, bool, ScalarType) -> Tensor", | |
&xla_norm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"norm_out(Tensor, Tensor, c10::optional<Scalar>, IntList, bool) -> Tensor", | |
&xla_norm_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"frobenius_norm(Tensor) -> Tensor", | |
&xla_frobenius_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"frobenius_norm(Tensor, IntList, bool) -> Tensor", | |
&xla_frobenius_norm_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"frobenius_norm_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_frobenius_norm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nuclear_norm(Tensor, bool) -> Tensor", | |
&xla_nuclear_norm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nuclear_norm_out(Tensor, Tensor, bool) -> Tensor", | |
&xla_nuclear_norm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_clone(Tensor) -> Tensor", | |
&xla_native_clone); | |
register_extension_backend_op( | |
Backend::TPU, | |
"clone(Tensor) -> Tensor", | |
&xla_clone); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_resize_as_(Tensor, Tensor) -> Tensor", | |
&xla_native_resize_as_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"resize_as_(Tensor, Tensor) -> Tensor", | |
&xla_resize_as_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_pow_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_native_pow_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_pow(Tensor, Scalar) -> Tensor", | |
&xla_native_pow); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_pow_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow(Tensor, Scalar) -> Tensor", | |
&xla_pow); | |
register_extension_backend_op( | |
Backend::TPU, | |
"native_zero_(Tensor) -> Tensor", | |
&xla_native_zero_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"zero_(Tensor) -> Tensor", | |
&xla_zero_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sub_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_sub_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sub(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_sub); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sub_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_sub_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sub(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_sub_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sub_(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_sub__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rsub(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_rsub); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rsub(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_rsub_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s_native_addmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s_native_addmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s_native_addmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s_native_addmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"s_native_addmm_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_s_native_addmm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_addmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla__sparse_addmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addmm_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addmm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_coo_tensor_with_dims(int64_t, int64_t, IntList, TensorOptions) -> Tensor", | |
&xla__sparse_coo_tensor_with_dims); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_sparse_coo_tensor_with_dims_and_tensors(int64_t, int64_t, IntList, Tensor, Tensor, TensorOptions) -> Tensor", | |
&xla__sparse_coo_tensor_with_dims_and_tensors); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sparse_resize_(Tensor, IntList, int64_t, int64_t) -> Tensor", | |
&xla_sparse_resize_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sparse_resize_and_clear_(Tensor, IntList, int64_t, int64_t) -> Tensor", | |
&xla_sparse_resize_and_clear_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sparse_mask(Tensor, SparseTensorRef) -> Tensor", | |
&xla_sparse_mask); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to_dense(Tensor) -> Tensor", | |
&xla_to_dense); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sparse_dim(Tensor) -> int64_t", | |
&xla_sparse_dim); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_dimI(Tensor) -> int64_t", | |
&xla__dimI); | |
register_extension_backend_op( | |
Backend::TPU, | |
"dense_dim(Tensor) -> int64_t", | |
&xla_dense_dim); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_dimV(Tensor) -> int64_t", | |
&xla__dimV); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_nnz(Tensor) -> int64_t", | |
&xla__nnz); | |
register_extension_backend_op( | |
Backend::TPU, | |
"coalesce(Tensor) -> Tensor", | |
&xla_coalesce); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_coalesced(Tensor) -> bool", | |
&xla_is_coalesced); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_indices(Tensor) -> Tensor", | |
&xla__indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_values(Tensor) -> Tensor", | |
&xla__values); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_coalesced_(Tensor, bool) -> Tensor", | |
&xla__coalesced_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"indices(Tensor) -> Tensor", | |
&xla_indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"values(Tensor) -> Tensor", | |
&xla_values); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hspmm_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_hspmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hspmm(Tensor, Tensor) -> Tensor", | |
&xla_hspmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"copy_sparse_to_sparse_(Tensor, Tensor, bool) -> Tensor", | |
&xla_copy_sparse_to_sparse_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"numel(Tensor) -> int64_t", | |
&xla_numel); | |
register_extension_backend_op( | |
Backend::TPU, | |
"unbind(Tensor, int64_t) -> std::vector<Tensor>", | |
&xla_unbind); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to_sparse(Tensor, int64_t) -> Tensor", | |
&xla_to_sparse); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to_sparse(Tensor) -> Tensor", | |
&xla_to_sparse_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to(Tensor, TensorOptions, bool, bool) -> Tensor", | |
&xla_to); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to(Tensor, Device, ScalarType, bool, bool) -> Tensor", | |
&xla_to_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to(Tensor, ScalarType, bool, bool) -> Tensor", | |
&xla_to_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"to(Tensor, Tensor, bool, bool) -> Tensor", | |
&xla_to_3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"meshgrid(TensorList) -> std::vector<Tensor>", | |
&xla_meshgrid); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cartesian_prod(TensorList) -> Tensor", | |
&xla_cartesian_prod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"combinations(Tensor, int64_t, bool) -> Tensor", | |
&xla_combinations); | |
register_extension_backend_op( | |
Backend::TPU, | |
"item(Tensor) -> Scalar", | |
&xla_item); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_local_scalar_dense(Tensor) -> Scalar", | |
&xla__local_scalar_dense); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_fused_lstm_cell(Tensor, Tensor, Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla__thnn_fused_lstm_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_fused_lstm_cell_backward(Tensor, Tensor, Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>", | |
&xla__thnn_fused_lstm_cell_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_fused_gru_cell(Tensor, Tensor, Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla__thnn_fused_gru_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_thnn_fused_gru_cell_backward(Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>", | |
&xla__thnn_fused_gru_cell_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lstm(Tensor, TensorList, TensorList, bool, int64_t, double, bool, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_lstm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lstm(Tensor, Tensor, TensorList, TensorList, bool, int64_t, double, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_lstm_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gru(Tensor, Tensor, TensorList, bool, int64_t, double, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_gru); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gru(Tensor, Tensor, Tensor, TensorList, bool, int64_t, double, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_gru_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rnn_tanh(Tensor, Tensor, TensorList, bool, int64_t, double, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_rnn_tanh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rnn_tanh(Tensor, Tensor, Tensor, TensorList, bool, int64_t, double, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_rnn_tanh_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rnn_relu(Tensor, Tensor, TensorList, bool, int64_t, double, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_rnn_relu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rnn_relu(Tensor, Tensor, Tensor, TensorList, bool, int64_t, double, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_rnn_relu_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lstm_cell(Tensor, TensorList, Tensor, Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_lstm_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gru_cell(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_gru_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rnn_tanh_cell(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_rnn_tanh_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rnn_relu_cell(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_rnn_relu_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"quantized_lstm(Tensor, TensorList, TensorList, bool, int64_t, double, bool, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_quantized_lstm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"quantized_lstm_cell(Tensor, TensorList, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Scalar, Scalar) -> std::tuple<Tensor,Tensor>", | |
&xla_quantized_lstm_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"quantized_gru_cell(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_quantized_gru_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"quantized_rnn_relu_cell(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_quantized_rnn_relu_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"quantized_rnn_tanh_cell(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_quantized_rnn_tanh_cell); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_pack_padded_sequence(Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla__pack_padded_sequence); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_pack_padded_sequence_backward(Tensor, IntList, Tensor, bool) -> Tensor", | |
&xla__pack_padded_sequence_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_pad_packed_sequence(Tensor, Tensor, bool, Scalar, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla__pad_packed_sequence); | |
register_extension_backend_op( | |
Backend::TPU, | |
"data_ptr(Tensor) -> void", | |
&xla_data_ptr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"set_(Tensor, Storage) -> Tensor", | |
&xla_set_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"set_(Tensor, Storage, int64_t, IntList, IntList) -> Tensor", | |
&xla_set__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"set_(Tensor, Tensor) -> Tensor", | |
&xla_set__2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"set_(Tensor) -> Tensor", | |
&xla_set__3); | |
register_extension_backend_op( | |
Backend::TPU, | |
"is_set_to(Tensor, Tensor) -> bool", | |
&xla_is_set_to); | |
register_extension_backend_op( | |
Backend::TPU, | |
"masked_fill_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_masked_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"masked_fill_(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_masked_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"masked_scatter_(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_masked_scatter_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"view(Tensor, IntList) -> Tensor", | |
&xla_view); | |
register_extension_backend_op( | |
Backend::TPU, | |
"put_(Tensor, Tensor, Tensor, bool) -> Tensor", | |
&xla_put_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_add_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla_index_add_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_fill_(Tensor, int64_t, Tensor, Scalar) -> Tensor", | |
&xla_index_fill_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_fill_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla_index_fill__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"scatter_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla_scatter_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"scatter_(Tensor, int64_t, Tensor, Scalar) -> Tensor", | |
&xla_scatter__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"scatter_add_(Tensor, int64_t, Tensor, Tensor) -> Tensor", | |
&xla_scatter_add_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lt_(Tensor, Scalar) -> Tensor", | |
&xla_lt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lt_(Tensor, Tensor) -> Tensor", | |
&xla_lt__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gt_(Tensor, Scalar) -> Tensor", | |
&xla_gt_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gt_(Tensor, Tensor) -> Tensor", | |
&xla_gt__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"le_(Tensor, Scalar) -> Tensor", | |
&xla_le_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"le_(Tensor, Tensor) -> Tensor", | |
&xla_le__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ge_(Tensor, Scalar) -> Tensor", | |
&xla_ge_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ge_(Tensor, Tensor) -> Tensor", | |
&xla_ge__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eq_(Tensor, Scalar) -> Tensor", | |
&xla_eq_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eq_(Tensor, Tensor) -> Tensor", | |
&xla_eq__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ne_(Tensor, Scalar) -> Tensor", | |
&xla_ne_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ne_(Tensor, Tensor) -> Tensor", | |
&xla_ne__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__and__(Tensor, Scalar) -> Tensor", | |
&xla___and__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__and__(Tensor, Tensor) -> Tensor", | |
&xla___and___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__iand__(Tensor, Scalar) -> Tensor", | |
&xla___iand__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__iand__(Tensor, Tensor) -> Tensor", | |
&xla___iand___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__or__(Tensor, Scalar) -> Tensor", | |
&xla___or__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__or__(Tensor, Tensor) -> Tensor", | |
&xla___or___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__ior__(Tensor, Scalar) -> Tensor", | |
&xla___ior__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__ior__(Tensor, Tensor) -> Tensor", | |
&xla___ior___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__xor__(Tensor, Scalar) -> Tensor", | |
&xla___xor__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__xor__(Tensor, Tensor) -> Tensor", | |
&xla___xor___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__ixor__(Tensor, Scalar) -> Tensor", | |
&xla___ixor__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__ixor__(Tensor, Tensor) -> Tensor", | |
&xla___ixor___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__lshift__(Tensor, Scalar) -> Tensor", | |
&xla___lshift__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__lshift__(Tensor, Tensor) -> Tensor", | |
&xla___lshift___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__ilshift__(Tensor, Scalar) -> Tensor", | |
&xla___ilshift__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__ilshift__(Tensor, Tensor) -> Tensor", | |
&xla___ilshift___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__rshift__(Tensor, Scalar) -> Tensor", | |
&xla___rshift__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__rshift__(Tensor, Tensor) -> Tensor", | |
&xla___rshift___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__irshift__(Tensor, Scalar) -> Tensor", | |
&xla___irshift__); | |
register_extension_backend_op( | |
Backend::TPU, | |
"__irshift__(Tensor, Tensor) -> Tensor", | |
&xla___irshift___1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lgamma_(Tensor) -> Tensor", | |
&xla_lgamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"atan2_(Tensor, Tensor) -> Tensor", | |
&xla_atan2_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tril_(Tensor, int64_t) -> Tensor", | |
&xla_tril_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"triu_(Tensor, int64_t) -> Tensor", | |
&xla_triu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"digamma_(Tensor) -> Tensor", | |
&xla_digamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"polygamma_(Tensor, int64_t) -> Tensor", | |
&xla_polygamma_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erfinv_(Tensor) -> Tensor", | |
&xla_erfinv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"frac_(Tensor) -> Tensor", | |
&xla_frac_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"renorm_(Tensor, Scalar, int64_t, Scalar) -> Tensor", | |
&xla_renorm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reciprocal_(Tensor) -> Tensor", | |
&xla_reciprocal_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"neg_(Tensor) -> Tensor", | |
&xla_neg_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow_(Tensor, Scalar) -> Tensor", | |
&xla_pow_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow_(Tensor, Tensor) -> Tensor", | |
&xla_pow__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lerp_(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_lerp_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sign_(Tensor) -> Tensor", | |
&xla_sign_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fmod_(Tensor, Scalar) -> Tensor", | |
&xla_fmod_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fmod_(Tensor, Tensor) -> Tensor", | |
&xla_fmod__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"remainder_(Tensor, Scalar) -> Tensor", | |
&xla_remainder_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"remainder_(Tensor, Tensor) -> Tensor", | |
&xla_remainder__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addbmm_(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addbmm_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addbmm_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addbmm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addbmm(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_addbmm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addcmul_(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_addcmul_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addcdiv_(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_addcdiv_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"random_(Tensor, int64_t, int64_t, Generator) -> Tensor", | |
&xla_random_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"random_(Tensor, int64_t, Generator) -> Tensor", | |
&xla_random__1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"random_(Tensor, Generator) -> Tensor", | |
&xla_random__2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"uniform_(Tensor, double, double, Generator) -> Tensor", | |
&xla_uniform_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal_(Tensor, double, double, Generator) -> Tensor", | |
&xla_normal_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cauchy_(Tensor, double, double, Generator) -> Tensor", | |
&xla_cauchy_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_normal_(Tensor, double, double, Generator) -> Tensor", | |
&xla_log_normal_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"exponential_(Tensor, double, Generator) -> Tensor", | |
&xla_exponential_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"geometric_(Tensor, double, Generator) -> Tensor", | |
&xla_geometric_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"diag_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_diag_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"diag(Tensor, int64_t) -> Tensor", | |
&xla_diag); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cross_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_cross_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cross(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_cross); | |
register_extension_backend_op( | |
Backend::TPU, | |
"triu_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_triu_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"triu(Tensor, int64_t) -> Tensor", | |
&xla_triu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tril_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_tril_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tril(Tensor, int64_t) -> Tensor", | |
&xla_tril); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tril_indices(int64_t, int64_t, int64_t, TensorOptions) -> Tensor", | |
&xla_tril_indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"triu_indices(int64_t, int64_t, int64_t, TensorOptions) -> Tensor", | |
&xla_triu_indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"trace(Tensor) -> Tensor", | |
&xla_trace); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ne_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_ne_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ne(Tensor, Scalar) -> Tensor", | |
&xla_ne); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ne_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_ne_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ne(Tensor, Tensor) -> Tensor", | |
&xla_ne_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eq_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_eq_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eq(Tensor, Scalar) -> Tensor", | |
&xla_eq); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eq_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_eq_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eq(Tensor, Tensor) -> Tensor", | |
&xla_eq_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ge_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_ge_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ge(Tensor, Scalar) -> Tensor", | |
&xla_ge); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ge_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_ge_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ge(Tensor, Tensor) -> Tensor", | |
&xla_ge_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"le_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_le_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"le(Tensor, Scalar) -> Tensor", | |
&xla_le); | |
register_extension_backend_op( | |
Backend::TPU, | |
"le_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_le_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"le(Tensor, Tensor) -> Tensor", | |
&xla_le_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gt_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_gt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gt(Tensor, Scalar) -> Tensor", | |
&xla_gt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gt_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_gt_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gt(Tensor, Tensor) -> Tensor", | |
&xla_gt_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lt_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_lt_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lt(Tensor, Scalar) -> Tensor", | |
&xla_lt); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lt_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_lt_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lt(Tensor, Tensor) -> Tensor", | |
&xla_lt_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"take_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_take_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"take(Tensor, Tensor) -> Tensor", | |
&xla_take); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_select_out(Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla_index_select_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"index_select(Tensor, int64_t, Tensor) -> Tensor", | |
&xla_index_select); | |
register_extension_backend_op( | |
Backend::TPU, | |
"masked_select_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_masked_select_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"masked_select(Tensor, Tensor) -> Tensor", | |
&xla_masked_select); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nonzero_out(Tensor, Tensor) -> Tensor", | |
&xla_nonzero_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nonzero(Tensor) -> Tensor", | |
&xla_nonzero); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gather_out(Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla_gather_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gather(Tensor, int64_t, Tensor) -> Tensor", | |
&xla_gather); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addcmul_out(Tensor, Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_addcmul_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addcmul(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_addcmul); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addcdiv_out(Tensor, Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_addcdiv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"addcdiv(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_addcdiv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gels_out(Tensor, Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_gels_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"gels(Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_gels); | |
register_extension_backend_op( | |
Backend::TPU, | |
"trtrs_out(Tensor, Tensor, Tensor, Tensor, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_trtrs_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"trtrs(Tensor, Tensor, bool, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_trtrs); | |
register_extension_backend_op( | |
Backend::TPU, | |
"symeig_out(Tensor, Tensor, Tensor, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_symeig_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"symeig(Tensor, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_symeig); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eig_out(Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_eig_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"eig(Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_eig); | |
register_extension_backend_op( | |
Backend::TPU, | |
"svd_out(Tensor, Tensor, Tensor, Tensor, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_svd_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"svd(Tensor, bool, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_svd); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cholesky_out(Tensor, Tensor, bool) -> Tensor", | |
&xla_cholesky_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cholesky(Tensor, bool) -> Tensor", | |
&xla_cholesky); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cholesky_helper(Tensor, bool) -> Tensor", | |
&xla__cholesky_helper); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cholesky_solve_out(Tensor, Tensor, Tensor, bool) -> Tensor", | |
&xla_cholesky_solve_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"cholesky_solve(Tensor, Tensor, bool) -> Tensor", | |
&xla_cholesky_solve); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_cholesky_solve_helper(Tensor, Tensor, bool) -> Tensor", | |
&xla__cholesky_solve_helper); | |
register_extension_backend_op( | |
Backend::TPU, | |
"potri_out(Tensor, Tensor, bool) -> Tensor", | |
&xla_potri_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"potri(Tensor, bool) -> Tensor", | |
&xla_potri); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pstrf_out(Tensor, Tensor, Tensor, bool, Scalar) -> std::tuple<Tensor,Tensor>", | |
&xla_pstrf_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pstrf(Tensor, bool, Scalar) -> std::tuple<Tensor,Tensor>", | |
&xla_pstrf); | |
register_extension_backend_op( | |
Backend::TPU, | |
"qr_out(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_qr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"qr(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_qr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"geqrf_out(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_geqrf_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"geqrf(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_geqrf); | |
register_extension_backend_op( | |
Backend::TPU, | |
"orgqr_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_orgqr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"orgqr(Tensor, Tensor) -> Tensor", | |
&xla_orgqr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ormqr_out(Tensor, Tensor, Tensor, Tensor, bool, bool) -> Tensor", | |
&xla_ormqr_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"ormqr(Tensor, Tensor, Tensor, bool, bool) -> Tensor", | |
&xla_ormqr); | |
register_extension_backend_op( | |
Backend::TPU, | |
"btrifact_out(Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_btrifact_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"btrifact(Tensor, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_btrifact); | |
register_extension_backend_op( | |
Backend::TPU, | |
"btrifact_with_info_out(Tensor, Tensor, Tensor, Tensor, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_btrifact_with_info_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"btrifact_with_info(Tensor, bool) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_btrifact_with_info); | |
register_extension_backend_op( | |
Backend::TPU, | |
"btrisolve_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_btrisolve_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"btrisolve(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_btrisolve); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multinomial_out(Tensor, Tensor, int64_t, bool, Generator) -> Tensor", | |
&xla_multinomial_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multinomial(Tensor, int64_t, bool, Generator) -> Tensor", | |
&xla_multinomial); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lgamma_out(Tensor, Tensor) -> Tensor", | |
&xla_lgamma_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lgamma(Tensor) -> Tensor", | |
&xla_lgamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"digamma_out(Tensor, Tensor) -> Tensor", | |
&xla_digamma_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"digamma(Tensor) -> Tensor", | |
&xla_digamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"polygamma_out(Tensor, int64_t, Tensor) -> Tensor", | |
&xla_polygamma_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"polygamma(int64_t, Tensor) -> Tensor", | |
&xla_polygamma); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erfinv_out(Tensor, Tensor) -> Tensor", | |
&xla_erfinv_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"erfinv(Tensor) -> Tensor", | |
&xla_erfinv); | |
register_extension_backend_op( | |
Backend::TPU, | |
"frac_out(Tensor, Tensor) -> Tensor", | |
&xla_frac_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"frac(Tensor) -> Tensor", | |
&xla_frac); | |
register_extension_backend_op( | |
Backend::TPU, | |
"dist(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_dist); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reciprocal_out(Tensor, Tensor) -> Tensor", | |
&xla_reciprocal_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reciprocal(Tensor) -> Tensor", | |
&xla_reciprocal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"neg_out(Tensor, Tensor) -> Tensor", | |
&xla_neg_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"neg(Tensor) -> Tensor", | |
&xla_neg); | |
register_extension_backend_op( | |
Backend::TPU, | |
"atan2_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_atan2_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"atan2(Tensor, Tensor) -> Tensor", | |
&xla_atan2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lerp_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_lerp_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"lerp(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_lerp); | |
register_extension_backend_op( | |
Backend::TPU, | |
"histc_out(Tensor, Tensor, int64_t, Scalar, Scalar) -> Tensor", | |
&xla_histc_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"histc(Tensor, int64_t, Scalar, Scalar) -> Tensor", | |
&xla_histc); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sign_out(Tensor, Tensor) -> Tensor", | |
&xla_sign_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sign(Tensor) -> Tensor", | |
&xla_sign); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fmod_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_fmod_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fmod(Tensor, Scalar) -> Tensor", | |
&xla_fmod); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fmod_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_fmod_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fmod(Tensor, Tensor) -> Tensor", | |
&xla_fmod_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"remainder_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_remainder_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"remainder(Tensor, Scalar) -> Tensor", | |
&xla_remainder); | |
register_extension_backend_op( | |
Backend::TPU, | |
"remainder_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_remainder_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"remainder(Tensor, Tensor) -> Tensor", | |
&xla_remainder_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"min_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_min_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"min(Tensor, Tensor) -> Tensor", | |
&xla_min_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"min(Tensor) -> Tensor", | |
&xla_min_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_max_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max(Tensor, Tensor) -> Tensor", | |
&xla_max_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max(Tensor) -> Tensor", | |
&xla_max_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"median(Tensor) -> Tensor", | |
&xla_median_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sort_out(Tensor, Tensor, Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_sort_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sort(Tensor, int64_t, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_sort); | |
register_extension_backend_op( | |
Backend::TPU, | |
"topk_out(Tensor, Tensor, Tensor, int64_t, int64_t, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_topk_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"topk(Tensor, int64_t, int64_t, bool, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_topk); | |
register_extension_backend_op( | |
Backend::TPU, | |
"all(Tensor) -> Tensor", | |
&xla_all_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"any(Tensor) -> Tensor", | |
&xla_any_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"renorm_out(Tensor, Tensor, Scalar, int64_t, Scalar) -> Tensor", | |
&xla_renorm_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"renorm(Tensor, Scalar, int64_t, Scalar) -> Tensor", | |
&xla_renorm); | |
register_extension_backend_op( | |
Backend::TPU, | |
"unfold(Tensor, int64_t, int64_t, int64_t) -> Tensor", | |
&xla_unfold); | |
register_extension_backend_op( | |
Backend::TPU, | |
"equal(Tensor, Tensor) -> bool", | |
&xla_equal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_pow_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow(Tensor, Tensor) -> Tensor", | |
&xla_pow_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow_out(Tensor, Scalar, Tensor) -> Tensor", | |
&xla_pow_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"pow(Scalar, Tensor) -> Tensor", | |
&xla_pow_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal_out(Tensor, Tensor, double, Generator) -> Tensor", | |
&xla_normal_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal(Tensor, double, Generator) -> Tensor", | |
&xla_normal); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal_out(Tensor, double, Tensor, Generator) -> Tensor", | |
&xla_normal_out_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal(double, Tensor, Generator) -> Tensor", | |
&xla_normal_1); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal_out(Tensor, Tensor, Tensor, Generator) -> Tensor", | |
&xla_normal_out_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"normal(Tensor, Tensor, Generator) -> Tensor", | |
&xla_normal_2); | |
register_extension_backend_op( | |
Backend::TPU, | |
"alias(Tensor) -> Tensor", | |
&xla_alias); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_dirichlet_grad_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla__dirichlet_grad_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"_dirichlet_grad(Tensor, Tensor, Tensor) -> Tensor", | |
&xla__dirichlet_grad); | |
register_extension_backend_op( | |
Backend::TPU, | |
"binary_cross_entropy_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_binary_cross_entropy_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"binary_cross_entropy(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_binary_cross_entropy); | |
register_extension_backend_op( | |
Backend::TPU, | |
"binary_cross_entropy_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_binary_cross_entropy_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"binary_cross_entropy_backward(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_binary_cross_entropy_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mse_loss_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_mse_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mse_loss(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_mse_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mse_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_mse_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"mse_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_mse_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"l1_loss_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_l1_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"l1_loss(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_l1_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"l1_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_l1_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"l1_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_l1_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multi_margin_loss_out(Tensor, Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla_multi_margin_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multi_margin_loss(Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla_multi_margin_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multi_margin_loss_backward_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla_multi_margin_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multi_margin_loss_backward(Tensor, Tensor, Tensor, Scalar, Scalar, Tensor, int64_t) -> Tensor", | |
&xla_multi_margin_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multilabel_margin_loss_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_multilabel_margin_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multilabel_margin_loss(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_multilabel_margin_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multilabel_margin_loss_forward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_multilabel_margin_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multilabel_margin_loss_forward(Tensor, Tensor, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_multilabel_margin_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multilabel_margin_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla_multilabel_margin_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"multilabel_margin_loss_backward(Tensor, Tensor, Tensor, int64_t, Tensor) -> Tensor", | |
&xla_multilabel_margin_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss_out(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_nll_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss(Tensor, Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_nll_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_nll_loss_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss_forward(Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_nll_loss_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla_nll_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss_backward(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla_nll_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss2d_out(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_nll_loss2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss2d(Tensor, Tensor, Tensor, int64_t, int64_t) -> Tensor", | |
&xla_nll_loss2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_nll_loss2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss2d_forward(Tensor, Tensor, Tensor, int64_t, int64_t) -> std::tuple<Tensor,Tensor>", | |
&xla_nll_loss2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla_nll_loss2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"nll_loss2d_backward(Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, Tensor) -> Tensor", | |
&xla_nll_loss2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"smooth_l1_loss_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_smooth_l1_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"smooth_l1_loss(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_smooth_l1_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"smooth_l1_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_smooth_l1_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"smooth_l1_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_smooth_l1_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"soft_margin_loss_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_soft_margin_loss_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"soft_margin_loss(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_soft_margin_loss); | |
register_extension_backend_op( | |
Backend::TPU, | |
"soft_margin_loss_backward_out(Tensor, Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_soft_margin_loss_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"soft_margin_loss_backward(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_soft_margin_loss_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"elu_out(Tensor, Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_elu_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"elu(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_elu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"elu_backward_out(Tensor, Tensor, Scalar, Scalar, Scalar, Tensor) -> Tensor", | |
&xla_elu_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"elu_backward(Tensor, Scalar, Scalar, Scalar, Tensor) -> Tensor", | |
&xla_elu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"elu_(Tensor, Scalar, Scalar, Scalar) -> Tensor", | |
&xla_elu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"glu_out(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_glu_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"glu(Tensor, int64_t) -> Tensor", | |
&xla_glu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"glu_backward_out(Tensor, Tensor, Tensor, int64_t) -> Tensor", | |
&xla_glu_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"glu_backward(Tensor, Tensor, int64_t) -> Tensor", | |
&xla_glu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardtanh_out(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_hardtanh_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardtanh(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_hardtanh); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardtanh_backward_out(Tensor, Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_hardtanh_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardtanh_backward(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_hardtanh_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"hardtanh_(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_hardtanh_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"leaky_relu_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_leaky_relu_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"leaky_relu(Tensor, Scalar) -> Tensor", | |
&xla_leaky_relu); | |
register_extension_backend_op( | |
Backend::TPU, | |
"leaky_relu_backward_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_leaky_relu_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"leaky_relu_backward(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_leaky_relu_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"leaky_relu_(Tensor, Scalar) -> Tensor", | |
&xla_leaky_relu_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_sigmoid_out(Tensor, Tensor) -> Tensor", | |
&xla_log_sigmoid_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_sigmoid(Tensor) -> Tensor", | |
&xla_log_sigmoid); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_sigmoid_forward_out(Tensor, Tensor, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_log_sigmoid_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_sigmoid_forward(Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_log_sigmoid_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_sigmoid_backward_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_log_sigmoid_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"log_sigmoid_backward(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_log_sigmoid_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu_with_noise_out(Tensor, Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla_rrelu_with_noise_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu_with_noise(Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla_rrelu_with_noise); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu_with_noise_backward_out(Tensor, Tensor, Tensor, Tensor, Scalar, Scalar, bool) -> Tensor", | |
&xla_rrelu_with_noise_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu_with_noise_backward(Tensor, Tensor, Tensor, Scalar, Scalar, bool) -> Tensor", | |
&xla_rrelu_with_noise_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"rrelu_with_noise_(Tensor, Tensor, Scalar, Scalar, bool, Generator) -> Tensor", | |
&xla_rrelu_with_noise_); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softplus_out(Tensor, Tensor, Scalar, Scalar) -> Tensor", | |
&xla_softplus_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softplus(Tensor, Scalar, Scalar) -> Tensor", | |
&xla_softplus); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softplus_backward_out(Tensor, Tensor, Tensor, Scalar, Scalar, Tensor) -> Tensor", | |
&xla_softplus_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softplus_backward(Tensor, Tensor, Scalar, Scalar, Tensor) -> Tensor", | |
&xla_softplus_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softshrink_out(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_softshrink_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softshrink(Tensor, Scalar) -> Tensor", | |
&xla_softshrink); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softshrink_backward_out(Tensor, Tensor, Tensor, Scalar) -> Tensor", | |
&xla_softshrink_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"softshrink_backward(Tensor, Tensor, Scalar) -> Tensor", | |
&xla_softshrink_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool2d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_adaptive_avg_pool2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool2d(Tensor, IntList) -> Tensor", | |
&xla_adaptive_avg_pool2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool2d_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_adaptive_avg_pool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool2d_backward(Tensor, Tensor) -> Tensor", | |
&xla_adaptive_avg_pool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool3d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_adaptive_avg_pool3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool3d(Tensor, IntList) -> Tensor", | |
&xla_adaptive_avg_pool3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool3d_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_adaptive_avg_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_avg_pool3d_backward(Tensor, Tensor) -> Tensor", | |
&xla_adaptive_avg_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool2d_out(Tensor, Tensor, Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla_adaptive_max_pool2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool2d(Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla_adaptive_max_pool2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool2d_backward_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_adaptive_max_pool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool2d_backward(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_adaptive_max_pool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool3d_out(Tensor, Tensor, Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla_adaptive_max_pool3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool3d(Tensor, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla_adaptive_max_pool3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool3d_backward_out(Tensor, Tensor, Tensor, Tensor) -> Tensor", | |
&xla_adaptive_max_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"adaptive_max_pool3d_backward(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_adaptive_max_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool2d_out(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool2d(Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool2d_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool2d_backward(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool3d_out(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool3d(Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool3d_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"avg_pool3d_backward(Tensor, Tensor, IntList, IntList, IntList, bool, bool) -> Tensor", | |
&xla_avg_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool2d_out(Tensor, Tensor, Tensor, IntList, IntList, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_fractional_max_pool2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool2d(Tensor, IntList, IntList, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_fractional_max_pool2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool2d_backward_out(Tensor, Tensor, Tensor, IntList, IntList, Tensor) -> Tensor", | |
&xla_fractional_max_pool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool2d_backward(Tensor, Tensor, IntList, IntList, Tensor) -> Tensor", | |
&xla_fractional_max_pool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool3d_out(Tensor, Tensor, Tensor, IntList, IntList, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_fractional_max_pool3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool3d(Tensor, IntList, IntList, Tensor) -> std::tuple<Tensor,Tensor>", | |
&xla_fractional_max_pool3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool3d_backward_out(Tensor, Tensor, Tensor, IntList, IntList, Tensor) -> Tensor", | |
&xla_fractional_max_pool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"fractional_max_pool3d_backward(Tensor, Tensor, IntList, IntList, Tensor) -> Tensor", | |
&xla_fractional_max_pool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool2d_with_indices_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max_pool2d_with_indices_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool2d_with_indices(Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max_pool2d_with_indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool2d_with_indices_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla_max_pool2d_with_indices_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool2d_with_indices_backward(Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla_max_pool2d_with_indices_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool3d_with_indices_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max_pool3d_with_indices_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool3d_with_indices(Tensor, IntList, IntList, IntList, IntList, bool) -> std::tuple<Tensor,Tensor>", | |
&xla_max_pool3d_with_indices); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool3d_with_indices_backward_out(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla_max_pool3d_with_indices_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_pool3d_with_indices_backward(Tensor, Tensor, IntList, IntList, IntList, IntList, bool, Tensor) -> Tensor", | |
&xla_max_pool3d_with_indices_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool2d_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_max_unpool2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool2d(Tensor, Tensor, IntList) -> Tensor", | |
&xla_max_unpool2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool2d_backward_out(Tensor, Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_max_unpool2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool2d_backward(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_max_unpool2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool3d_out(Tensor, Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_max_unpool3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool3d(Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_max_unpool3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool3d_backward_out(Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_max_unpool3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"max_unpool3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_max_unpool3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad1d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_reflection_pad1d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad1d(Tensor, IntList) -> Tensor", | |
&xla_reflection_pad1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad1d_backward_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_reflection_pad1d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad1d_backward(Tensor, Tensor, IntList) -> Tensor", | |
&xla_reflection_pad1d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad2d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_reflection_pad2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad2d(Tensor, IntList) -> Tensor", | |
&xla_reflection_pad2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad2d_backward_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_reflection_pad2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"reflection_pad2d_backward(Tensor, Tensor, IntList) -> Tensor", | |
&xla_reflection_pad2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad1d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad1d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad1d(Tensor, IntList) -> Tensor", | |
&xla_replication_pad1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad1d_backward_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad1d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad1d_backward(Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad1d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad2d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad2d(Tensor, IntList) -> Tensor", | |
&xla_replication_pad2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad2d_backward_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad2d_backward(Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad3d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad3d(Tensor, IntList) -> Tensor", | |
&xla_replication_pad3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad3d_backward_out(Tensor, Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"replication_pad3d_backward(Tensor, Tensor, IntList) -> Tensor", | |
&xla_replication_pad3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_linear1d_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_linear1d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_linear1d(Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_linear1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_linear1d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_linear1d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_linear1d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_linear1d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bilinear2d_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_bilinear2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bilinear2d(Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_bilinear2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bilinear2d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_bilinear2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bilinear2d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_bilinear2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bicubic2d_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_bicubic2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bicubic2d(Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_bicubic2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bicubic2d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_bicubic2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_bicubic2d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_bicubic2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_trilinear3d_out(Tensor, Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_trilinear3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_trilinear3d(Tensor, IntList, bool) -> Tensor", | |
&xla_upsample_trilinear3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_trilinear3d_backward_out(Tensor, Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_trilinear3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_trilinear3d_backward(Tensor, IntList, IntList, bool) -> Tensor", | |
&xla_upsample_trilinear3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest1d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_upsample_nearest1d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest1d(Tensor, IntList) -> Tensor", | |
&xla_upsample_nearest1d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest1d_backward_out(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla_upsample_nearest1d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest1d_backward(Tensor, IntList, IntList) -> Tensor", | |
&xla_upsample_nearest1d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest2d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_upsample_nearest2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest2d(Tensor, IntList) -> Tensor", | |
&xla_upsample_nearest2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest2d_backward_out(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla_upsample_nearest2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest2d_backward(Tensor, IntList, IntList) -> Tensor", | |
&xla_upsample_nearest2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest3d_out(Tensor, Tensor, IntList) -> Tensor", | |
&xla_upsample_nearest3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest3d(Tensor, IntList) -> Tensor", | |
&xla_upsample_nearest3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest3d_backward_out(Tensor, Tensor, IntList, IntList) -> Tensor", | |
&xla_upsample_nearest3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"upsample_nearest3d_backward(Tensor, IntList, IntList) -> Tensor", | |
&xla_upsample_nearest3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sigmoid_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_sigmoid_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"sigmoid_backward(Tensor, Tensor) -> Tensor", | |
&xla_sigmoid_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tanh_backward_out(Tensor, Tensor, Tensor) -> Tensor", | |
&xla_tanh_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"tanh_backward(Tensor, Tensor) -> Tensor", | |
&xla_tanh_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose2d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_transpose2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose2d(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_transpose2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose3d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_transpose3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose3d(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_transpose3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose3d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose3d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose3d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_transpose3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_transpose3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv2d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList) -> Tensor", | |
&xla_thnn_conv2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv2d(Tensor, Tensor, IntList, Tensor, IntList, IntList) -> Tensor", | |
&xla_thnn_conv2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_depthwise2d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_depthwise2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_depthwise2d(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_depthwise2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_depthwise2d_forward_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_depthwise2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_depthwise2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_depthwise2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_depthwise2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor>", | |
&xla_thnn_conv_depthwise2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_depthwise2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, std::array<bool,2>) -> std::tuple<Tensor,Tensor>", | |
&xla_thnn_conv_depthwise2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv3d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList) -> Tensor", | |
&xla_thnn_conv3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv3d(Tensor, Tensor, IntList, Tensor, IntList, IntList) -> Tensor", | |
&xla_thnn_conv3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv3d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv3d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv3d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated2d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_dilated2d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated2d(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_dilated2d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated2d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated2d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated2d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated2d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated2d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated2d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated2d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated2d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated3d_out(Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_dilated3d_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated3d(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_conv_dilated3d); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated3d_forward_out(Tensor, Tensor, Tensor, Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated3d_forward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated3d_forward(Tensor, Tensor, IntList, Tensor, IntList, IntList, IntList) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated3d_forward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated3d_backward_out(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated3d_backward_out); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_conv_dilated3d_backward(Tensor, Tensor, Tensor, IntList, IntList, IntList, IntList, Tensor, Tensor, std::array<bool,3>) -> std::tuple<Tensor,Tensor,Tensor>", | |
&xla_thnn_conv_dilated3d_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_col2im(Tensor, IntList, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_col2im); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_col2im_backward(Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_col2im_backward); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_im2col(Tensor, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_im2col); | |
register_extension_backend_op( | |
Backend::TPU, | |
"thnn_im2col_backward(Tensor, IntList, IntList, IntList, IntList, IntList) -> Tensor", | |
&xla_thnn_im2col_backward); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment