Created
January 29, 2019 18:19
-
-
Save dlibenzi/65d251126b9292f06b2adb93bbc5ffbe to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Autogenerated file by gen.py. Do not edit directly! | |
#include "aten_xla_bridge.h" | |
#include <ATen/Context.h> | |
#include <ATen/CPUGenerator.h> | |
#include <ATen/TypeDefault.h> | |
namespace torch_xla { | |
class XLATypeBase : public at::TypeDefault { | |
public: | |
XLATypeBase(at::TensorTypeId type_id, bool is_variable, bool is_undefined) | |
: at::TypeDefault(type_id, is_variable, is_undefined) {} | |
caffe2::TypeMeta typeMeta() const override { | |
return scalarTypeToTypeMeta(scalarType()); | |
} | |
at::Backend backend() const override { | |
return at::Backend::HIP; | |
} | |
at::Allocator * allocator() const override { | |
return at::getCPUAllocator(); | |
} | |
c10::Device getDeviceFromPtr(void * data) const override { | |
return at::DeviceType::HIP; | |
} | |
std::unique_ptr<at::Generator> generator() const override { | |
return std::unique_ptr<at::Generator>(new at::CPUGenerator(&at::globalContext())); | |
} | |
at::TypeID ID() const override { | |
return at::TypeID::Undefined; | |
} | |
at::Tensor & _th_set_(at::Tensor & self, at::Storage source) const override; | |
at::Tensor & _th_set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntList size, at::IntList stride) const override; | |
at::Tensor & _th_set_(at::Tensor & self, const at::Tensor & source) const override; | |
at::Tensor & _th_set_(at::Tensor & self) const override; | |
at::Tensor & _th_fill_(at::Tensor & self, at::Scalar value) const override; | |
at::Tensor & _th_fill_(at::Tensor & self, const at::Tensor & value) const override; | |
bool _th_is_set_to(const at::Tensor & self, const at::Tensor & tensor) const override; | |
at::Tensor & _th_masked_fill_(at::Tensor & self, const at::Tensor & mask, at::Scalar value) const override; | |
at::Tensor & s__th_masked_fill_(at::Tensor & self, const at::Tensor & mask, at::Scalar value) const override; | |
at::Tensor & _th_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) const override; | |
at::Tensor & s__th_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) const override; | |
at::Tensor & _th_masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) const override; | |
at::Tensor & s__th_masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) const override; | |
at::Tensor & _th_masked_select_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mask) const override; | |
at::Tensor & s__th_masked_select_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mask) const override; | |
at::Tensor _th_masked_select(const at::Tensor & self, const at::Tensor & mask) const override; | |
at::Tensor s__th_masked_select(const at::Tensor & self, const at::Tensor & mask) const override; | |
at::Tensor & _th_nonzero_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_nonzero(const at::Tensor & self) const override; | |
at::Tensor _th_clone(const at::Tensor & self) const override; | |
at::Tensor _th_view(const at::Tensor & self, at::IntList size) const override; | |
at::Tensor & _th_resize_as_(at::Tensor & self, const at::Tensor & the_template) const override; | |
at::Tensor & _th_index_select_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor _th_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor & _th_index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const override; | |
at::Tensor & _th_take_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & index) const override; | |
at::Tensor _th_take(const at::Tensor & self, const at::Tensor & index) const override; | |
at::Tensor & _th_put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) const override; | |
at::Tensor & _th_index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const override; | |
at::Tensor & _th_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const override; | |
at::Tensor & _th_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) const override; | |
at::Tensor & _th_unfold_out(at::Tensor & result, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) const override; | |
at::Tensor _th_unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) const override; | |
at::Tensor & _th_scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const override; | |
at::Tensor & _th_scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const override; | |
at::Tensor & _th_scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const override; | |
at::Tensor & _th_gather_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor _th_gather(const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
bool _th_equal(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_and_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_and(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_and_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_and_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_and(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_and(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_iand_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_iand_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_iand_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_or_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_or(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_or_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_or_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_or(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_or(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ior_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ior_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ior_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_xor_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_xor(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_xor_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_xor_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_xor(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_xor(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ixor_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ixor_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ixor_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_lshift_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_lshift(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_lshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_lshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_lshift(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_lshift(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ilshift_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ilshift_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ilshift_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_rshift_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_rshift(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_rshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_rshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_rshift(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_rshift(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_irshift_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_irshift_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_irshift_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_lt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_lt(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_lt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_lt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_lt(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_lt(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_lt_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_lt_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_lt_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_gt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_gt(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_gt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_gt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_gt(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_gt(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_gt_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_gt_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_gt_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_le_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_le(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_le_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_le_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_le(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_le(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_le_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_le_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_le_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ge_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_ge(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ge_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ge_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_ge(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_ge(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ge_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ge_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ge_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_eq_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_eq(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_eq_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_eq_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_eq(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_eq(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_eq_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_eq_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_eq_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ne_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_ne(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ne_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ne_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_ne(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_ne(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_ne_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_ne_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_ne_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_min_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_min_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_min(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_min(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_min(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_min(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & _th_max_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_max_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_max(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_max(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_max(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_max_out(at::Tensor & max, at::Tensor & max_indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_max(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_kthvalue(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_mode(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor _th_median(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_median(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool descending) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_sort(const at::Tensor & self, int64_t dim, bool descending) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const override; | |
at::Tensor _th_any(const at::Tensor & self) const override; | |
at::Tensor & _th_any_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor _th_any(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & _th_abs_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_abs(const at::Tensor & self) const override; | |
at::Tensor & _th_sigmoid_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_sigmoid(const at::Tensor & self) const override; | |
at::Tensor & _th_log_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_log(const at::Tensor & self) const override; | |
at::Tensor & _th_log10_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_log10(const at::Tensor & self) const override; | |
at::Tensor & _th_log1p_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_log1p(const at::Tensor & self) const override; | |
at::Tensor & _th_log2_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_log2(const at::Tensor & self) const override; | |
at::Tensor & _th_lgamma_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_lgamma(const at::Tensor & self) const override; | |
at::Tensor & _th_lgamma_(at::Tensor & self) const override; | |
at::Tensor & _th_digamma_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_digamma(const at::Tensor & self) const override; | |
at::Tensor & _th_digamma_(at::Tensor & self) const override; | |
at::Tensor & _th_polygamma_out(at::Tensor & result, int64_t n, const at::Tensor & self) const override; | |
at::Tensor _th_polygamma(int64_t n, const at::Tensor & self) const override; | |
at::Tensor & _th_polygamma_(at::Tensor & self, int64_t n) const override; | |
at::Tensor & _th_exp_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_exp(const at::Tensor & self) const override; | |
at::Tensor & _th_expm1_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_expm1(const at::Tensor & self) const override; | |
at::Tensor & _th_cos_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_cos(const at::Tensor & self) const override; | |
at::Tensor & _th_acos_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_acos(const at::Tensor & self) const override; | |
at::Tensor & _th_cosh_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_cosh(const at::Tensor & self) const override; | |
at::Tensor & _th_sin_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_sin(const at::Tensor & self) const override; | |
at::Tensor & _th_asin_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_asin(const at::Tensor & self) const override; | |
at::Tensor & _th_sinh_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_sinh(const at::Tensor & self) const override; | |
at::Tensor & _th_tan_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_tan(const at::Tensor & self) const override; | |
at::Tensor & _th_atan_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_atan(const at::Tensor & self) const override; | |
at::Tensor & _th_tanh_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_tanh(const at::Tensor & self) const override; | |
at::Tensor & _th_erf_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_erf(const at::Tensor & self) const override; | |
at::Tensor & _th_erfc_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_erfc(const at::Tensor & self) const override; | |
at::Tensor & _th_erfinv_(at::Tensor & self) const override; | |
at::Tensor & _th_erfinv_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_erfinv(const at::Tensor & self) const override; | |
at::Tensor & _th_sqrt_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_sqrt(const at::Tensor & self) const override; | |
at::Tensor & _th_rsqrt_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_rsqrt(const at::Tensor & self) const override; | |
at::Tensor & _th_ceil_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_ceil(const at::Tensor & self) const override; | |
at::Tensor & _th_floor_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_floor(const at::Tensor & self) const override; | |
at::Tensor & _th_round_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_round(const at::Tensor & self) const override; | |
at::Tensor & _th_trunc_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_trunc(const at::Tensor & self) const override; | |
at::Tensor & _th_frac_(at::Tensor & self) const override; | |
at::Tensor & _th_frac_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_frac(const at::Tensor & self) const override; | |
at::Tensor & _th_var_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const override; | |
at::Tensor _th_var(const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const override; | |
at::Tensor _th_var(const at::Tensor & self, bool unbiased) const override; | |
at::Tensor & _th_std_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const override; | |
at::Tensor _th_std(const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const override; | |
at::Tensor _th_std(const at::Tensor & self, bool unbiased) const override; | |
at::Tensor & _th_renorm_out(at::Tensor & result, const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const override; | |
at::Tensor _th_renorm(const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const override; | |
at::Tensor & _th_renorm_(at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const override; | |
at::Tensor _th_dist(const at::Tensor & self, const at::Tensor & other, at::Scalar p) const override; | |
at::Tensor s__th_dist(const at::Tensor & self, const at::Tensor & other, at::Scalar p) const override; | |
at::Tensor & _th_reciprocal_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_reciprocal(const at::Tensor & self) const override; | |
at::Tensor & _th_reciprocal_(at::Tensor & self) const override; | |
at::Tensor & _th_neg_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_neg(const at::Tensor & self) const override; | |
at::Tensor & _th_neg_(at::Tensor & self) const override; | |
at::Tensor & _th_atan2_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_atan2_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_atan2(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_atan2(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_atan2_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_atan2_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_pow_out(at::Tensor & result, const at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor _th_pow(const at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor & _th_pow_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor & s__th_pow_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor _th_pow(const at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor s__th_pow(const at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor & _th_pow_out(at::Tensor & result, at::Scalar self, const at::Tensor & exponent) const override; | |
at::Tensor _th_pow(at::Scalar self, const at::Tensor & exponent) const override; | |
at::Tensor & _th_pow_(at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor & _th_pow_(at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor & s__th_pow_(at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor & _th_lerp_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor & s__th_lerp_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor _th_lerp(const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor s__th_lerp(const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor & _th_lerp_(at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor & s__th_lerp_(at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor & _th_histc_out(at::Tensor & result, const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const override; | |
at::Tensor _th_histc(const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const override; | |
at::Tensor & _th_zero_(at::Tensor & self) const override; | |
at::Tensor & _th_cumsum_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor _th_cumsum(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & _th_cumprod_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor _th_cumprod(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & _th_sign_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _th_sign(const at::Tensor & self) const override; | |
at::Tensor & _th_sign_(at::Tensor & self) const override; | |
at::Tensor _th_trace(const at::Tensor & self) const override; | |
at::Tensor & _th_fmod_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_fmod(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_fmod_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_fmod_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_fmod(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_fmod(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_fmod_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_fmod_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_fmod_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_remainder_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor _th_remainder(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_remainder_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_remainder_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _th_remainder(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor s__th_remainder(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_remainder_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _th_remainder_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & s__th_remainder_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _th_clamp_out(at::Tensor & result, const at::Tensor & self, at::Scalar min, at::Scalar max) const override; | |
at::Tensor _th_clamp(const at::Tensor & self, at::Scalar min, at::Scalar max) const override; | |
at::Tensor & _th_clamp_min_out(at::Tensor & result, const at::Tensor & self, at::Scalar min) const override; | |
at::Tensor _th_clamp_min(const at::Tensor & self, at::Scalar min) const override; | |
at::Tensor & _th_clamp_max_out(at::Tensor & result, const at::Tensor & self, at::Scalar max) const override; | |
at::Tensor _th_clamp_max(const at::Tensor & self, at::Scalar max) const override; | |
at::Tensor _th_dot(const at::Tensor & self, const at::Tensor & tensor) const override; | |
at::Tensor & _th_cross_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, int64_t dim) const override; | |
at::Tensor _th_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) const override; | |
at::Tensor & _th_diag_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor _th_diag(const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor & _th_addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & s__th_addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _th_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor s__th_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addmv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & s__th_addmv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _th_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor s__th_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & s__th_addr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _th_addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor s__th_addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addr_(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_ger_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec2) const override; | |
at::Tensor _th_ger(const at::Tensor & self, const at::Tensor & vec2) const override; | |
at::Tensor & _th_mv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec) const override; | |
at::Tensor _th_mv(const at::Tensor & self, const at::Tensor & vec) const override; | |
at::Tensor & _th_mm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor _th_mm(const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor & _th_bmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor _th_bmm(const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor & _th_addbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & s__th_addbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _th_addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor s__th_addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_baddbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & s__th_baddbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _th_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor s__th_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _th_addcmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & s__th_addcmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor _th_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor s__th_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & _th_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & s__th_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & _th_addcdiv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & s__th_addcdiv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor _th_addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor s__th_addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & _th_addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & s__th_addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_gels_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, const at::Tensor & A) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_gels(const at::Tensor & self, const at::Tensor & A) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_trtrs_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_trtrs(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_symeig_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, bool eigenvectors, bool upper) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_symeig(const at::Tensor & self, bool eigenvectors, bool upper) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_eig_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, bool eigenvectors) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_eig(const at::Tensor & self, bool eigenvectors) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _th_svd_out(at::Tensor & res1, at::Tensor & res2, at::Tensor & res3, const at::Tensor & self, bool some, bool compute_uv) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _th_svd(const at::Tensor & self, bool some, bool compute_uv) const override; | |
at::Tensor & _th_getri_single_out(at::Tensor & output, const at::Tensor & self) const override; | |
at::Tensor _th_getri_single(const at::Tensor & self) const override; | |
at::Tensor & _th_potri_out(at::Tensor & output, const at::Tensor & self, bool upper) const override; | |
at::Tensor _th_potri(const at::Tensor & self, bool upper) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_pstrf_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, bool upper, at::Scalar tol) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_pstrf(const at::Tensor & self, bool upper, at::Scalar tol) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_qr_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_qr(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_geqrf_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_geqrf(const at::Tensor & self) const override; | |
at::Tensor & _th_orgqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2) const override; | |
at::Tensor _th_orgqr(const at::Tensor & self, const at::Tensor & input2) const override; | |
at::Tensor & _th_ormqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const override; | |
at::Tensor _th_ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _th_btrifact_out(at::Tensor & result, at::Tensor & pivots, const at::Tensor & self, bool pivot) const override; | |
std::tuple<at::Tensor,at::Tensor> _th_btrifact(const at::Tensor & self, bool pivot) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _th_btrifact_with_info_out(at::Tensor & result, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool pivot) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _th_btrifact_with_info(const at::Tensor & self, bool pivot) const override; | |
at::Tensor & _th_btrisolve_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const override; | |
at::Tensor _th_btrisolve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const override; | |
at::Tensor & _th_random_(at::Tensor & self, int64_t from, int64_t to, at::Generator * generator) const override; | |
at::Tensor & _th_random_(at::Tensor & self, int64_t to, at::Generator * generator) const override; | |
at::Tensor & _th_random_(at::Tensor & self, at::Generator * generator) const override; | |
at::Tensor & _th_multinomial_out(at::Tensor & result, const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const override; | |
at::Tensor _th_multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const override; | |
at::Tensor & _th_uniform_(at::Tensor & self, double from, double to, at::Generator * generator) const override; | |
at::Tensor & _th_normal_out(at::Tensor & output, const at::Tensor & mean, double std, at::Generator * generator) const override; | |
at::Tensor _th_normal(const at::Tensor & mean, double std, at::Generator * generator) const override; | |
at::Tensor & _th_normal_out(at::Tensor & output, double mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor _th_normal(double mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor & _th_normal_out(at::Tensor & output, const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor _th_normal(const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor & _th_normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const override; | |
at::Tensor & _th_cauchy_(at::Tensor & self, double median, double sigma, at::Generator * generator) const override; | |
at::Tensor & _th_log_normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const override; | |
at::Tensor & _th_exponential_(at::Tensor & self, double lambd, at::Generator * generator) const override; | |
at::Tensor & _th_geometric_(at::Tensor & self, double p, at::Generator * generator) const override; | |
at::Tensor & _th_dirichlet_grad_out(at::Tensor & output, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const override; | |
at::Tensor _th_dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const override; | |
at::Tensor _th_alias(const at::Tensor & self) const override; | |
at::Tensor & _th_copy_ignoring_overlaps_(at::Tensor & self, const at::Tensor & src) const override; | |
at::Tensor & _th_cat_out(at::Tensor & self, at::TensorList tensors, int64_t dim) const override; | |
at::Tensor _th_cat(at::TensorList tensors, int64_t dim) const override; | |
at::Tensor & _thnn_binary_cross_entropy_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor _thnn_binary_cross_entropy_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & _thnn_binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor _thnn_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & _thnn_l1_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_l1_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_mse_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_mse_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_multi_margin_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor _thnn_multi_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & _thnn_multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor _thnn_multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const override; | |
at::Tensor _thnn_multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
at::Tensor & _thnn_nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor _thnn_nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
at::Tensor & _thnn_nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor _thnn_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor & _thnn_smooth_l1_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_smooth_l1_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_soft_margin_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_soft_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor _thnn_soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & _thnn_elu_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor _thnn_elu_forward(const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor & _thnn_elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const override; | |
at::Tensor _thnn_elu_backward(const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const override; | |
at::Tensor & _thnn_elu_(at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor & _thnn_elu_forward_(at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor & _thnn_glu_forward_out(at::Tensor & output, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor _thnn_glu_forward(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & _thnn_glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor _thnn_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & _thnn_hardtanh_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor _thnn_hardtanh_forward(const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & _thnn_hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor _thnn_hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & _thnn_hardtanh_(at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & _thnn_hardtanh_forward_(at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & _thnn_leaky_relu_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor _thnn_leaky_relu_forward(const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor & _thnn_leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor _thnn_leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor & _thnn_leaky_relu_(at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor & _thnn_leaky_relu_forward_(at::Tensor & self, at::Scalar negative_slope) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_log_sigmoid_forward(const at::Tensor & self) const override; | |
at::Tensor & _thnn_log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const override; | |
at::Tensor _thnn_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const override; | |
at::Tensor & _thnn_rrelu_with_noise_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor _thnn_rrelu_with_noise_forward(const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor & _thnn_rrelu_with_noise_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const override; | |
at::Tensor _thnn_rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const override; | |
at::Tensor & _thnn_rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor & _thnn_rrelu_with_noise_forward_(at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor & _thnn_softplus_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const override; | |
at::Tensor _thnn_softplus_forward(const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const override; | |
at::Tensor & _thnn_softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const override; | |
at::Tensor _thnn_softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const override; | |
at::Tensor & _thnn_softshrink_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor _thnn_softshrink_forward(const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor & _thnn_softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor _thnn_softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor & _thnn_adaptive_avg_pool3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor _thnn_adaptive_avg_pool3d_forward(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & _thnn_adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) const override; | |
at::Tensor _thnn_adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_adaptive_max_pool2d_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_adaptive_max_pool2d_forward(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & _thnn_adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
at::Tensor _thnn_adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_adaptive_max_pool3d_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_adaptive_max_pool3d_forward(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & _thnn_adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
at::Tensor _thnn_adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
at::Tensor & _thnn_avg_pool2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor _thnn_avg_pool2d_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor & _thnn_avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor _thnn_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor & _thnn_avg_pool3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor _thnn_avg_pool3d_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor & _thnn_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor _thnn_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_max_pool2d_with_indices_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_max_pool2d_with_indices_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor & _thnn_max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
at::Tensor _thnn_max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_max_pool3d_with_indices_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_max_pool3d_with_indices_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor & _thnn_max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
at::Tensor _thnn_max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
at::Tensor & _thnn_max_unpool2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor _thnn_max_unpool2d_forward(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor & _thnn_max_unpool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor _thnn_max_unpool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor & _thnn_max_unpool3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor _thnn_max_unpool3d_forward(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor & _thnn_max_unpool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor _thnn_max_unpool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor & _thnn_upsample_linear1d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_linear1d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_linear1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_bilinear2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_bilinear2d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_bicubic2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_bicubic2d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_trilinear3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_trilinear3d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor _thnn_upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & _thnn_upsample_nearest1d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor _thnn_upsample_nearest1d_forward(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & _thnn_upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor _thnn_upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor & _thnn_upsample_nearest2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor _thnn_upsample_nearest2d_forward(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & _thnn_upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor _thnn_upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor & _thnn_upsample_nearest3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor _thnn_upsample_nearest3d_forward(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & _thnn_upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor _thnn_upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor & _thnn_sigmoid_forward_out(at::Tensor & output, const at::Tensor & self) const override; | |
at::Tensor _thnn_sigmoid_forward(const at::Tensor & self) const override; | |
at::Tensor & _thnn_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor _thnn_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor & _thnn_tanh_forward_out(at::Tensor & output, const at::Tensor & self) const override; | |
at::Tensor _thnn_tanh_forward(const at::Tensor & self) const override; | |
at::Tensor & _thnn_tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor _thnn_tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_transpose2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_transpose2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_transpose2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_transpose2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_transpose3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_transpose3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_transpose3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_transpose3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv2d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
at::Tensor & _thnn_conv_depthwise2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
at::Tensor _thnn_conv_depthwise2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &> _thnn_conv_depthwise2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_conv_depthwise2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, std::array<bool,2> output_mask) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_dilated2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_dilated2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_dilated2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_dilated2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_dilated3d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_dilated3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_conv_dilated3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_conv_dilated3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const override; | |
at::Tensor & _thnn_col2im_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor _thnn_col2im_forward(const at::Tensor & self, at::IntList output_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor & _thnn_col2im_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor _thnn_col2im_backward(const at::Tensor & grad_output, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor & _thnn_im2col_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor _thnn_im2col_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor & _thnn_im2col_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList input_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor _thnn_im2col_backward(const at::Tensor & grad_output, at::IntList input_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor _cast_Byte(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Char(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Long(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking) const override; | |
at::Tensor _cast_Half(const at::Tensor & self, bool non_blocking) const override; | |
std::tuple<at::Tensor,at::Tensor> _fused_dropout(const at::Tensor & self, double p, at::Generator * generator) const override; | |
at::Tensor _masked_scale(const at::Tensor & self, const at::Tensor & mask, double scale) const override; | |
at::Tensor _reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) const override; | |
at::Tensor _shape_as_tensor(const at::Tensor & self) const override; | |
at::Tensor dropout(const at::Tensor & input, double p, bool train) const override; | |
at::Tensor & dropout_(at::Tensor & self, double p, bool train) const override; | |
at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) const override; | |
at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) const override; | |
at::Tensor alpha_dropout(const at::Tensor & input, double p, bool train) const override; | |
at::Tensor & alpha_dropout_(at::Tensor & self, double p, bool train) const override; | |
at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train) const override; | |
at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train) const override; | |
at::Tensor abs(const at::Tensor & self) const override; | |
at::Tensor & abs_(at::Tensor & self) const override; | |
at::Tensor & abs_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor acos(const at::Tensor & self) const override; | |
at::Tensor & acos_(at::Tensor & self) const override; | |
at::Tensor & acos_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor avg_pool1d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntList output_size) const override; | |
std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor add(const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor & add_(at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor & add_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor add(const at::Tensor & self, at::Scalar other, at::Scalar alpha) const override; | |
at::Tensor & add_(at::Tensor & self, at::Scalar other, at::Scalar alpha) const override; | |
at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addmv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addr_(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor affine_grid_generator(const at::Tensor & theta, at::IntList size) const override; | |
at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntList size) const override; | |
at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & all_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
bool allclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) const override; | |
at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & any_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & arange_out(at::Tensor & result, at::Scalar end) const override; | |
at::Tensor & arange_out(at::Tensor & result, at::Scalar start, at::Scalar end, at::Scalar step) const override; | |
at::Tensor _dim_arange(const at::Tensor & like, int64_t dim) const override; | |
at::Tensor argmax(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor argmax(const at::Tensor & self) const override; | |
at::Tensor _argmax(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor argmin(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor argmin(const at::Tensor & self) const override; | |
at::Tensor _argmin(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor as_strided(const at::Tensor & self, at::IntList size, at::IntList stride, c10::optional<int64_t> storage_offset) const override; | |
at::Tensor & as_strided_(at::Tensor & self, at::IntList size, at::IntList stride, c10::optional<int64_t> storage_offset) const override; | |
at::Tensor asin(const at::Tensor & self) const override; | |
at::Tensor & asin_(at::Tensor & self) const override; | |
at::Tensor & asin_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor atan(const at::Tensor & self) const override; | |
at::Tensor & atan_(at::Tensor & self) const override; | |
at::Tensor & atan_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & _baddbmm_mkl_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & baddbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor batch_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) const override; | |
at::Tensor bernoulli(const at::Tensor & self, at::Generator * generator) const override; | |
at::Tensor & bernoulli_out(at::Tensor & result, const at::Tensor & self, at::Generator * generator) const override; | |
at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, at::Generator * generator) const override; | |
at::Tensor & bernoulli_(at::Tensor & self, double p, at::Generator * generator) const override; | |
at::Tensor bernoulli(const at::Tensor & self, double p, at::Generator * generator) const override; | |
at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const at::Tensor & bias) const override; | |
at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, const at::Tensor & pos_weight, int64_t reduction) const override; | |
at::Tensor binary_cross_entropy_with_logits_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, const at::Tensor & pos_weight, int64_t reduction) const override; | |
at::Tensor bincount(const at::Tensor & self, const at::Tensor & weights, int64_t minlength) const override; | |
at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor & bmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const override; | |
std::vector<at::Tensor> broadcast_tensors(at::TensorList tensors) const override; | |
at::Tensor cat(at::TensorList tensors, int64_t dim) const override; | |
at::Tensor & cat_out(at::Tensor & result, at::TensorList tensors, int64_t dim) const override; | |
at::Tensor ceil(const at::Tensor & self) const override; | |
at::Tensor & ceil_(at::Tensor & self) const override; | |
at::Tensor & ceil_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor chain_matmul(at::TensorList matrices) const override; | |
std::vector<at::Tensor> chunk(const at::Tensor & self, int64_t chunks, int64_t dim) const override; | |
at::Tensor clamp(const at::Tensor & self, c10::optional<at::Scalar> min, c10::optional<at::Scalar> max) const override; | |
at::Tensor & clamp_(at::Tensor & self, c10::optional<at::Scalar> min, c10::optional<at::Scalar> max) const override; | |
at::Tensor & clamp_out(at::Tensor & result, const at::Tensor & self, c10::optional<at::Scalar> min, c10::optional<at::Scalar> max) const override; | |
at::Tensor clamp_max(const at::Tensor & self, at::Scalar max) const override; | |
at::Tensor & clamp_max_(at::Tensor & self, at::Scalar max) const override; | |
at::Tensor & clamp_max_out(at::Tensor & result, const at::Tensor & self, at::Scalar max) const override; | |
at::Tensor clamp_min(const at::Tensor & self, at::Scalar min) const override; | |
at::Tensor & clamp_min_(at::Tensor & self, at::Scalar min) const override; | |
at::Tensor & clamp_min_out(at::Tensor & result, const at::Tensor & self, at::Scalar min) const override; | |
at::Tensor constant_pad_nd(const at::Tensor & self, at::IntList pad, at::Scalar value) const override; | |
at::Tensor contiguous(const at::Tensor & self) const override; | |
at::Tensor convolution(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding, int64_t groups) const override; | |
at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) const override; | |
at::Tensor _convolution_nogroup(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const at::Tensor & ggI, const at::Tensor & ggW, const at::Tensor & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) const override; | |
at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, int64_t groups) const override; | |
at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, int64_t groups) const override; | |
at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, int64_t groups) const override; | |
at::Tensor conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) const override; | |
at::Tensor conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, int64_t groups, at::IntList dilation) const override; | |
at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, int64_t groups, at::IntList dilation) const override; | |
at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, int64_t groups, at::IntList dilation) const override; | |
at::Tensor & s_copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) const override; | |
at::Tensor _s_copy_from(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) const override; | |
void _copy_same_type_(at::Tensor & self, const at::Tensor & src) const override; | |
at::Tensor cos(const at::Tensor & self) const override; | |
at::Tensor & cos_(at::Tensor & self) const override; | |
at::Tensor & cos_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor cosh(const at::Tensor & self) const override; | |
at::Tensor & cosh_(at::Tensor & self) const override; | |
at::Tensor & cosh_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) const override; | |
at::Tensor cumsum(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor cumsum(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & cumsum_out(at::Tensor & result, const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor & cumsum_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor cumprod(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor cumprod(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & cumprod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor & cumprod_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntList input_lengths, at::IntList target_lengths, int64_t blank, int64_t reduction) const override; | |
at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction) const override; | |
std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntList input_lengths, at::IntList target_lengths, int64_t blank) const override; | |
at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntList input_lengths, at::IntList target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank) const override; | |
at::Tensor det(const at::Tensor & self) const override; | |
at::Tensor diag_embed(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) const override; | |
at::Tensor diagflat(const at::Tensor & self, int64_t offset) const override; | |
at::Tensor diagonal(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) const override; | |
at::Tensor div(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & div_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & div_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor div(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & div_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor dot(const at::Tensor & self, const at::Tensor & tensor) const override; | |
at::Tensor & dot_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor) const override; | |
at::Tensor einsum(std::string equation, at::TensorList tensors) const override; | |
at::Tensor embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const override; | |
at::Tensor embedding_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const override; | |
at::Tensor embedding_dense_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const override; | |
at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) const override; | |
at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const override; | |
at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse) const override; | |
at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) const override; | |
at::Tensor _embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) const override; | |
at::Tensor empty(at::IntList size, const at::TensorOptions & options) const override; | |
at::Tensor & resize_(at::Tensor & self, at::IntList size) const override; | |
at::Tensor & empty_out(at::Tensor & result, at::IntList size) const override; | |
at::Tensor empty_like(const at::Tensor & self) const override; | |
at::Tensor empty_strided(at::IntList size, at::IntList stride, const at::TensorOptions & options) const override; | |
at::Tensor erf(const at::Tensor & self) const override; | |
at::Tensor & erf_(at::Tensor & self) const override; | |
at::Tensor & erf_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor erfc(const at::Tensor & self) const override; | |
at::Tensor & erfc_(at::Tensor & self) const override; | |
at::Tensor & erfc_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor exp(const at::Tensor & self) const override; | |
at::Tensor & exp_(at::Tensor & self) const override; | |
at::Tensor & exp_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor expm1(const at::Tensor & self) const override; | |
at::Tensor & expm1_(at::Tensor & self) const override; | |
at::Tensor & expm1_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor expand(const at::Tensor & self, at::IntList size, bool implicit) const override; | |
at::Tensor expand_as(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & eye_out(at::Tensor & result, int64_t n) const override; | |
at::Tensor & eye_out(at::Tensor & result, int64_t n, int64_t m) const override; | |
at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim) const override; | |
at::Tensor & fill_(at::Tensor & self, at::Scalar value) const override; | |
at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) const override; | |
at::Tensor floor(const at::Tensor & self) const override; | |
at::Tensor & floor_(at::Tensor & self) const override; | |
at::Tensor & floor_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor & full_out(at::Tensor & result, at::IntList size, at::Scalar fill_value) const override; | |
at::Tensor full_like(const at::Tensor & self, at::Scalar fill_value) const override; | |
at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) const override; | |
at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) const override; | |
at::Tensor & ger_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec2) const override; | |
std::tuple<at::Tensor,at::Tensor> gesv(const at::Tensor & self, const at::Tensor & A) const override; | |
std::tuple<at::Tensor &,at::Tensor &> gesv_out(at::Tensor & solution, at::Tensor & lu, const at::Tensor & self, const at::Tensor & A) const override; | |
std::tuple<at::Tensor,at::Tensor> _gesv_helper(const at::Tensor & self, const at::Tensor & A) const override; | |
at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const at::Tensor & weight, const at::Tensor & bias, double eps, bool cudnn_enabled) const override; | |
at::Tensor fft(const at::Tensor & self, int64_t signal_ndim, bool normalized) const override; | |
at::Tensor ifft(const at::Tensor & self, int64_t signal_ndim, bool normalized) const override; | |
at::Tensor rfft(const at::Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) const override; | |
at::Tensor irfft(const at::Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, at::IntList signal_sizes) const override; | |
at::Tensor _fft_with_size(const at::Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, at::IntList checked_signal_sizes, bool normalized, bool onesided, at::IntList output_sizes) const override; | |
void _cufft_set_plan_cache_max_size(int64_t max_size) const override; | |
at::Tensor index(const at::Tensor & self, at::TensorList indices) const override; | |
at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const override; | |
at::Tensor index_put(const at::Tensor & self, at::TensorList indices, const at::Tensor & values, bool accumulate) const override; | |
at::Tensor & index_put_(at::Tensor & self, at::TensorList indices, const at::Tensor & values, bool accumulate) const override; | |
at::Tensor instance_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) const override; | |
at::Tensor inverse(const at::Tensor & self) const override; | |
at::Tensor & inverse_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor _inverse_helper(const at::Tensor & self) const override; | |
at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) const override; | |
at::Tensor isnan(const at::Tensor & self) const override; | |
bool is_distributed(const at::Tensor & self) const override; | |
bool is_floating_point(const at::Tensor & self) const override; | |
bool is_complex(const at::Tensor & self) const override; | |
bool is_nonzero(const at::Tensor & self) const override; | |
bool is_same_size(const at::Tensor & self, const at::Tensor & other) const override; | |
bool is_signed(const at::Tensor & self) const override; | |
at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor kl_div_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const override; | |
at::Tensor layer_norm(const at::Tensor & input, at::IntList normalized_shape, const at::Tensor & weight, const at::Tensor & bias, double eps, bool cudnn_enable) const override; | |
at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias) const override; | |
at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, at::Scalar weight_scale, at::Scalar weight_zero_point, const at::Tensor & bias) const override; | |
std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight(const at::Tensor & input) const override; | |
at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) const override; | |
at::Tensor & linspace_out(at::Tensor & result, at::Scalar start, at::Scalar end, int64_t steps) const override; | |
at::Tensor log(const at::Tensor & self) const override; | |
at::Tensor & log_(at::Tensor & self) const override; | |
at::Tensor & log_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor log10(const at::Tensor & self) const override; | |
at::Tensor & log10_(at::Tensor & self) const override; | |
at::Tensor & log10_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor log1p(const at::Tensor & self) const override; | |
at::Tensor & log1p_(at::Tensor & self) const override; | |
at::Tensor & log1p_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor log2(const at::Tensor & self) const override; | |
at::Tensor & log2_(at::Tensor & self) const override; | |
at::Tensor & log2_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor logdet(const at::Tensor & self) const override; | |
at::Tensor & logspace_out(at::Tensor & result, at::Scalar start, at::Scalar end, int64_t steps) const override; | |
at::Tensor log_softmax(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor log_softmax(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) const override; | |
at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) const override; | |
at::Tensor logsumexp(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & logsumexp_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) const override; | |
at::Tensor matmul(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & matmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor matrix_rank(const at::Tensor & self, double tol, bool symmetric) const override; | |
at::Tensor matrix_rank(const at::Tensor & self, bool symmetric) const override; | |
at::Tensor matrix_power(const at::Tensor & self, int64_t n) const override; | |
std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor max_values(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor max_pool1d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor max_pool2d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor max_pool3d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor mean(const at::Tensor & self, at::ScalarType dtype) const override; | |
at::Tensor mean(const at::Tensor & self) const override; | |
at::Tensor mean(const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor mean(const at::Tensor & self, at::IntList dim, bool keepdim) const override; | |
at::Tensor mean(const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const override; | |
at::Tensor & mean_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor & mean_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim) const override; | |
at::Tensor & mean_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const override; | |
std::tuple<at::Tensor,at::Tensor> median(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor min_values(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups) const override; | |
at::Tensor mkldnn_convolution_backward_input(at::IntList self_size, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool bias_defined) const override; | |
std::tuple<at::Tensor,at::Tensor> mkldnn_convolution_backward_weights(at::IntList weight_size, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool bias_defined) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, std::array<bool,3> output_mask) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double exponential_average_factor, double epsilon) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const at::Tensor & running_mean, const at::Tensor & running_var, const at::Tensor & save_mean, const at::Tensor & save_var, double epsilon) const override; | |
at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const override; | |
at::Tensor miopen_convolution_backward_input(at::IntList self_size, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const override; | |
at::Tensor miopen_convolution_backward_bias(const at::Tensor & grad_output) const override; | |
at::Tensor miopen_convolution_backward_weight(at::IntList weight_size, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const override; | |
at::Tensor miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, at::IntList padding, at::IntList output_padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList output_padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const override; | |
at::Tensor miopen_convolution_transpose_backward_input(const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const override; | |
at::Tensor miopen_convolution_transpose_backward_weight(at::IntList weight_size, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const override; | |
at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor & mm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) const override; | |
std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
std::tuple<at::Tensor &,at::Tensor &> mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor mul(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & mul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor mul(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & mul_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) const override; | |
at::Tensor & mv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec) const override; | |
at::Tensor mvlgamma(const at::Tensor & self, int64_t p) const override; | |
at::Tensor & mvlgamma_(at::Tensor & self, int64_t p) const override; | |
at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) const override; | |
at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & running_mean, const at::Tensor & running_var, const at::Tensor & save_mean, const at::Tensor & save_invstd, bool train, double eps, std::array<bool,3> output_mask) const override; | |
std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats(const at::Tensor & input, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum) const override; | |
at::Tensor & ones_out(at::Tensor & result, at::IntList size) const override; | |
at::Tensor ones_like(const at::Tensor & self) const override; | |
at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) const override; | |
at::Tensor pdist(const at::Tensor & self, double p) const override; | |
at::Tensor _pdist_forward(const at::Tensor & self, double p) const override; | |
at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) const override; | |
at::Tensor cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) const override; | |
at::Tensor permute(const at::Tensor & self, at::IntList dims) const override; | |
at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) const override; | |
at::Tensor pin_memory(const at::Tensor & self) const override; | |
at::Tensor pinverse(const at::Tensor & self, double rcond) const override; | |
at::Tensor & rand_out(at::Tensor & result, at::IntList size) const override; | |
at::Tensor & rand_out(at::Tensor & result, at::IntList size, at::Generator * generator) const override; | |
at::Tensor rand_like(const at::Tensor & self) const override; | |
at::Tensor & randint_out(at::Tensor & result, int64_t high, at::IntList size) const override; | |
at::Tensor & randint_out(at::Tensor & result, int64_t high, at::IntList size, at::Generator * generator) const override; | |
at::Tensor & randint_out(at::Tensor & result, int64_t low, int64_t high, at::IntList size) const override; | |
at::Tensor & randint_out(at::Tensor & result, int64_t low, int64_t high, at::IntList size, at::Generator * generator) const override; | |
at::Tensor randint_like(const at::Tensor & self, int64_t high) const override; | |
at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high) const override; | |
at::Tensor & randn_out(at::Tensor & result, at::IntList size) const override; | |
at::Tensor & randn_out(at::Tensor & result, at::IntList size, at::Generator * generator) const override; | |
at::Tensor randn_like(const at::Tensor & self) const override; | |
at::Tensor & randperm_out(at::Tensor & result, int64_t n) const override; | |
at::Tensor & randperm_out(at::Tensor & result, int64_t n, at::Generator * generator) const override; | |
at::Tensor & range_out(at::Tensor & result, at::Scalar start, at::Scalar end, at::Scalar step) const override; | |
at::Tensor repeat(const at::Tensor & self, at::IntList repeats) const override; | |
at::Tensor reshape(const at::Tensor & self, at::IntList shape) const override; | |
at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other) const override; | |
std::tuple<at::Tensor,at::Tensor> RoiPooling2d_forward(const at::Tensor & input, const at::Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale) const override; | |
at::Tensor RoiPooling2d_backward(const at::Tensor & input, const at::Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale, const at::Tensor & gradOutput, const at::Tensor & argmaxes) const override; | |
at::Tensor round(const at::Tensor & self) const override; | |
at::Tensor & round_(at::Tensor & self) const override; | |
at::Tensor & round_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor rrelu(const at::Tensor & self, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor & rrelu_(at::Tensor & self, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor relu(const at::Tensor & self) const override; | |
at::Tensor & relu_(at::Tensor & self) const override; | |
at::Tensor prelu(const at::Tensor & self, const at::Tensor & weight) const override; | |
std::tuple<at::Tensor,at::Tensor> prelu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) const override; | |
at::Tensor hardshrink(const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor rsqrt(const at::Tensor & self) const override; | |
at::Tensor & rsqrt_(at::Tensor & self) const override; | |
at::Tensor & rsqrt_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) const override; | |
at::Tensor selu(const at::Tensor & self) const override; | |
at::Tensor & selu_(at::Tensor & self) const override; | |
at::Tensor celu(const at::Tensor & self, at::Scalar alpha) const override; | |
at::Tensor & celu_(at::Tensor & self, at::Scalar alpha) const override; | |
at::Tensor sigmoid(const at::Tensor & self) const override; | |
at::Tensor & sigmoid_(at::Tensor & self) const override; | |
at::Tensor & sigmoid_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor sin(const at::Tensor & self) const override; | |
at::Tensor & sin_(at::Tensor & self) const override; | |
at::Tensor & sin_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor sinh(const at::Tensor & self) const override; | |
at::Tensor & sinh_(at::Tensor & self) const override; | |
at::Tensor & sinh_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor detach(const at::Tensor & self) const override; | |
at::Tensor & detach_(at::Tensor & self) const override; | |
int64_t size(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor slice(const at::Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) const override; | |
std::tuple<at::Tensor,at::Tensor> slogdet(const at::Tensor & self) const override; | |
at::Tensor smm(const at::Tensor & self, const at::Tensor & mat2) const override; | |
at::Tensor softmax(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor softmax(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float) const override; | |
at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) const override; | |
at::Tensor & _sparse_add_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor & _sparse_dense_add_out(at::Tensor & result, const at::Tensor & self, at::SparseTensorRef other, at::Scalar alpha) const override; | |
at::Tensor & _sparse_div_zerodim_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _sparse_div_scalar_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & _sparse_mul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _sparse_mul_zerodim_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & _sparse_mul_scalar_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
std::vector<at::Tensor> split(const at::Tensor & self, int64_t split_size, int64_t dim) const override; | |
std::vector<at::Tensor> split_with_sizes(const at::Tensor & self, at::IntList split_sizes, int64_t dim) const override; | |
at::Tensor squeeze(const at::Tensor & self) const override; | |
at::Tensor squeeze(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & squeeze_(at::Tensor & self) const override; | |
at::Tensor & squeeze_(at::Tensor & self, int64_t dim) const override; | |
at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & sspaddmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor stack(at::TensorList tensors, int64_t dim) const override; | |
at::Tensor & stack_out(at::Tensor & result, at::TensorList tensors, int64_t dim) const override; | |
at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const at::Tensor & window, bool normalized, bool onesided) const override; | |
int64_t stride(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor sum(const at::Tensor & self, at::ScalarType dtype) const override; | |
at::Tensor sum(const at::Tensor & self) const override; | |
at::Tensor sum(const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor sum(const at::Tensor & self, at::IntList dim, bool keepdim) const override; | |
at::Tensor sum(const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const override; | |
at::Tensor & sum_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor & sum_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim) const override; | |
at::Tensor & sum_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const override; | |
at::Tensor sum_to_size(const at::Tensor & self, at::IntList size) const override; | |
at::Tensor sqrt(const at::Tensor & self) const override; | |
at::Tensor & sqrt_(at::Tensor & self) const override; | |
at::Tensor & sqrt_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor std(const at::Tensor & self, bool unbiased) const override; | |
at::Tensor std(const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const override; | |
at::Tensor & std_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const override; | |
at::Tensor prod(const at::Tensor & self, at::ScalarType dtype) const override; | |
at::Tensor prod(const at::Tensor & self) const override; | |
at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor prod(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor & prod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor & prod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const override; | |
at::Tensor & prod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, at::ScalarType dtype) const override; | |
at::Tensor t(const at::Tensor & self) const override; | |
at::Tensor & t_(at::Tensor & self) const override; | |
at::Tensor tan(const at::Tensor & self) const override; | |
at::Tensor & tan_(at::Tensor & self) const override; | |
at::Tensor & tan_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor tanh(const at::Tensor & self) const override; | |
at::Tensor & tanh_(at::Tensor & self) const override; | |
at::Tensor & tanh_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor tensordot(const at::Tensor & self, const at::Tensor & other, at::IntList dims_self, at::IntList dims_other) const override; | |
at::Tensor threshold(const at::Tensor & self, at::Scalar threshold, at::Scalar value) const override; | |
at::Tensor & threshold_(at::Tensor & self, at::Scalar threshold, at::Scalar value) const override; | |
at::Tensor & threshold_out(at::Tensor & result, const at::Tensor & self, at::Scalar threshold, at::Scalar value) const override; | |
at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar threshold) const override; | |
at::Tensor transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) const override; | |
at::Tensor & transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) const override; | |
at::Tensor one_hot(const at::Tensor & self, int64_t num_classes) const override; | |
at::Tensor flip(const at::Tensor & self, at::IntList dims) const override; | |
at::Tensor roll(const at::Tensor & self, at::IntList shifts, at::IntList dims) const override; | |
at::Tensor rot90(const at::Tensor & self, int64_t k, at::IntList dims) const override; | |
at::Tensor _trilinear(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntList expand1, at::IntList expand2, at::IntList expand3, at::IntList sumdim, int64_t unroll_dim) const override; | |
at::Tensor triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) const override; | |
at::Tensor trunc(const at::Tensor & self) const override; | |
at::Tensor & trunc_(at::Tensor & self) const override; | |
at::Tensor & trunc_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor type_as(const at::Tensor & self, const at::Tensor & other) const override; | |
std::tuple<at::Tensor,at::Tensor> _unique(const at::Tensor & self, bool sorted, bool return_inverse) const override; | |
std::tuple<at::Tensor,at::Tensor> _unique_dim(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse) const override; | |
at::Tensor _unsafe_view(const at::Tensor & self, at::IntList size) const override; | |
at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & unsqueeze_(at::Tensor & self, int64_t dim) const override; | |
at::Tensor var(const at::Tensor & self, bool unbiased) const override; | |
at::Tensor var(const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const override; | |
at::Tensor & var_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const override; | |
at::Tensor view_as(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor _s_where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow, int64_t dim) const override; | |
at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim) const override; | |
std::tuple<at::Tensor,at::Tensor> _weight_norm_cuda_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim) const override; | |
std::tuple<at::Tensor,at::Tensor> _weight_norm_cuda_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) const override; | |
std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) const override; | |
at::Tensor & zeros_out(at::Tensor & result, at::IntList size) const override; | |
at::Tensor zeros_like(const at::Tensor & self) const override; | |
at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) const override; | |
at::Tensor _standard_gamma(const at::Tensor & self, at::Generator * generator) const override; | |
at::Tensor poisson(const at::Tensor & self, at::Generator * generator) const override; | |
at::Tensor native_norm(const at::Tensor & self, at::Scalar p) const override; | |
at::Tensor _sparse_sum(const at::Tensor & self) const override; | |
at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype) const override; | |
at::Tensor _sparse_sum(const at::Tensor & self, at::IntList dim) const override; | |
at::Tensor _sparse_sum(const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const override; | |
at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntList dim) const override; | |
at::Tensor norm(const at::Tensor & self, c10::optional<at::Scalar> p, at::ScalarType dtype) const override; | |
at::Tensor norm(const at::Tensor & self, at::Scalar p) const override; | |
at::Tensor norm(const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor norm(const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim) const override; | |
at::Tensor & norm_out(at::Tensor & result, const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim, at::ScalarType dtype) const override; | |
at::Tensor & norm_out(at::Tensor & result, const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim) const override; | |
at::Tensor frobenius_norm(const at::Tensor & self) const override; | |
at::Tensor frobenius_norm(const at::Tensor & self, at::IntList dim, bool keepdim) const override; | |
at::Tensor & frobenius_norm_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim) const override; | |
at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim) const override; | |
at::Tensor & nuclear_norm_out(at::Tensor & result, const at::Tensor & self, bool keepdim) const override; | |
at::Tensor native_clone(const at::Tensor & self) const override; | |
at::Tensor clone(const at::Tensor & self) const override; | |
at::Tensor & native_resize_as_(at::Tensor & self, const at::Tensor & the_template) const override; | |
at::Tensor & resize_as_(at::Tensor & self, const at::Tensor & the_template) const override; | |
at::Tensor & native_pow_out(at::Tensor & result, const at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor native_pow(const at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor & pow_out(at::Tensor & result, const at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor pow(const at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor & native_zero_(at::Tensor & self) const override; | |
at::Tensor & zero_(at::Tensor & self) const override; | |
at::Tensor & sub_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor sub(const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor sub(const at::Tensor & self, at::Scalar other, at::Scalar alpha) const override; | |
at::Tensor & sub_(at::Tensor & self, at::Scalar other, at::Scalar alpha) const override; | |
at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const override; | |
at::Tensor rsub(const at::Tensor & self, at::Scalar other, at::Scalar alpha) const override; | |
at::Tensor & s_native_addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor s_native_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & s_native_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _sparse_addmm(const at::Tensor & self, const at::Tensor & sparse, const at::Tensor & dense, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntList size, const at::TensorOptions & options) const override; | |
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntList size, const at::Tensor & indices, const at::Tensor & values, const at::TensorOptions & options) const override; | |
at::Tensor & sparse_resize_(at::Tensor & self, at::IntList size, int64_t sparse_dim, int64_t dense_dim) const override; | |
at::Tensor & sparse_resize_and_clear_(at::Tensor & self, at::IntList size, int64_t sparse_dim, int64_t dense_dim) const override; | |
at::Tensor sparse_mask(const at::Tensor & self, at::SparseTensorRef mask) const override; | |
at::Tensor to_dense(const at::Tensor & self) const override; | |
int64_t sparse_dim(const at::Tensor & self) const override; | |
int64_t _dimI(const at::Tensor & self) const override; | |
int64_t dense_dim(const at::Tensor & self) const override; | |
int64_t _dimV(const at::Tensor & self) const override; | |
int64_t _nnz(const at::Tensor & self) const override; | |
at::Tensor coalesce(const at::Tensor & self) const override; | |
bool is_coalesced(const at::Tensor & self) const override; | |
at::Tensor _indices(const at::Tensor & self) const override; | |
at::Tensor _values(const at::Tensor & self) const override; | |
at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) const override; | |
at::Tensor indices(const at::Tensor & self) const override; | |
at::Tensor values(const at::Tensor & self) const override; | |
at::Tensor & hspmm_out(at::Tensor & result, const at::Tensor & mat1, const at::Tensor & mat2) const override; | |
at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) const override; | |
at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) const override; | |
int64_t numel(const at::Tensor & self) const override; | |
std::vector<at::Tensor> unbind(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim) const override; | |
at::Tensor to_sparse(const at::Tensor & self) const override; | |
at::Tensor to(const at::Tensor & self, const at::TensorOptions & options, bool non_blocking, bool copy) const override; | |
at::Tensor to(const at::Tensor & self, c10::Device device, at::ScalarType dtype, bool non_blocking, bool copy) const override; | |
at::Tensor to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy) const override; | |
at::Tensor to(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy) const override; | |
std::vector<at::Tensor> meshgrid(at::TensorList tensors) const override; | |
at::Tensor cartesian_prod(at::TensorList tensors) const override; | |
at::Tensor combinations(const at::Tensor & self, int64_t r, bool with_replacement) const override; | |
at::Scalar item(const at::Tensor & self) const override; | |
at::Scalar _local_scalar_dense(const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const at::Tensor & input_bias, const at::Tensor & hidden_bias) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward(const at::Tensor & grad_hy, const at::Tensor & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) const override; | |
std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const at::Tensor & input_bias, const at::Tensor & hidden_bias) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const override; | |
std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const override; | |
std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const override; | |
std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const override; | |
std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const override; | |
std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const override; | |
std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const override; | |
std::tuple<at::Tensor,at::Tensor> lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const override; | |
at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const override; | |
at::Tensor rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const override; | |
at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> quantized_lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const override; | |
std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const override; | |
at::Tensor quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const override; | |
at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const override; | |
at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const override; | |
std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) const override; | |
at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, at::IntList input_size, const at::Tensor & batch_sizes, bool batch_first) const override; | |
std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, at::Scalar padding_value, int64_t total_length) const override; | |
void* data_ptr(const at::Tensor & self) const override; | |
at::Tensor & set_(at::Tensor & self, at::Storage source) const override; | |
at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntList size, at::IntList stride) const override; | |
at::Tensor & set_(at::Tensor & self, const at::Tensor & source) const override; | |
at::Tensor & set_(at::Tensor & self) const override; | |
bool is_set_to(const at::Tensor & self, const at::Tensor & tensor) const override; | |
at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, at::Scalar value) const override; | |
at::Tensor & masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) const override; | |
at::Tensor & masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) const override; | |
at::Tensor view(const at::Tensor & self, at::IntList size) const override; | |
at::Tensor & put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) const override; | |
at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const override; | |
at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const override; | |
at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) const override; | |
at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const override; | |
at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const override; | |
at::Tensor & scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const override; | |
at::Tensor & lt_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & lt_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & gt_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & gt_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & le_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & le_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & ge_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & ge_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & eq_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & eq_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & ne_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & ne_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor __and__(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor __and__(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & __iand__(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & __iand__(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor __or__(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor __or__(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & __ior__(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & __ior__(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor __xor__(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & __ixor__(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & __ixor__(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor __lshift__(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & __ilshift__(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & __ilshift__(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor __rshift__(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor __rshift__(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & __irshift__(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & __irshift__(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & lgamma_(at::Tensor & self) const override; | |
at::Tensor & atan2_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & tril_(at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor & triu_(at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor & digamma_(at::Tensor & self) const override; | |
at::Tensor & polygamma_(at::Tensor & self, int64_t n) const override; | |
at::Tensor & erfinv_(at::Tensor & self) const override; | |
at::Tensor & frac_(at::Tensor & self) const override; | |
at::Tensor & renorm_(at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const override; | |
at::Tensor & reciprocal_(at::Tensor & self) const override; | |
at::Tensor & neg_(at::Tensor & self) const override; | |
at::Tensor & pow_(at::Tensor & self, at::Scalar exponent) const override; | |
at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor & sign_(at::Tensor & self) const override; | |
at::Tensor & fmod_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & remainder_(at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const override; | |
at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & random_(at::Tensor & self, int64_t from, int64_t to, at::Generator * generator) const override; | |
at::Tensor & random_(at::Tensor & self, int64_t to, at::Generator * generator) const override; | |
at::Tensor & random_(at::Tensor & self, at::Generator * generator) const override; | |
at::Tensor & uniform_(at::Tensor & self, double from, double to, at::Generator * generator) const override; | |
at::Tensor & normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const override; | |
at::Tensor & cauchy_(at::Tensor & self, double median, double sigma, at::Generator * generator) const override; | |
at::Tensor & log_normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const override; | |
at::Tensor & exponential_(at::Tensor & self, double lambd, at::Generator * generator) const override; | |
at::Tensor & geometric_(at::Tensor & self, double p, at::Generator * generator) const override; | |
at::Tensor & diag_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor diag(const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor & cross_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, int64_t dim) const override; | |
at::Tensor cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) const override; | |
at::Tensor & triu_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor triu(const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor & tril_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor tril(const at::Tensor & self, int64_t diagonal) const override; | |
at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, const at::TensorOptions & options) const override; | |
at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, const at::TensorOptions & options) const override; | |
at::Tensor trace(const at::Tensor & self) const override; | |
at::Tensor & ne_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor ne(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & ne_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor ne(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & eq_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor eq(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & eq_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor eq(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & ge_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor ge(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & ge_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor ge(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & le_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor le(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & le_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor le(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & gt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor gt(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & gt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor gt(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & lt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor lt(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & lt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor lt(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & take_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & index) const override; | |
at::Tensor take(const at::Tensor & self, const at::Tensor & index) const override; | |
at::Tensor & index_select_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor & masked_select_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mask) const override; | |
at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask) const override; | |
at::Tensor & nonzero_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor nonzero(const at::Tensor & self) const override; | |
at::Tensor & gather_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index) const override; | |
at::Tensor & addcmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor & addcdiv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const override; | |
std::tuple<at::Tensor &,at::Tensor &> gels_out(at::Tensor & X, at::Tensor & qr, const at::Tensor & self, const at::Tensor & A) const override; | |
std::tuple<at::Tensor,at::Tensor> gels(const at::Tensor & self, const at::Tensor & A) const override; | |
std::tuple<at::Tensor &,at::Tensor &> trtrs_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const override; | |
std::tuple<at::Tensor,at::Tensor> trtrs(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const override; | |
std::tuple<at::Tensor &,at::Tensor &> symeig_out(at::Tensor & e, at::Tensor & V, const at::Tensor & self, bool eigenvectors, bool upper) const override; | |
std::tuple<at::Tensor,at::Tensor> symeig(const at::Tensor & self, bool eigenvectors, bool upper) const override; | |
std::tuple<at::Tensor &,at::Tensor &> eig_out(at::Tensor & e, at::Tensor & v, const at::Tensor & self, bool eigenvectors) const override; | |
std::tuple<at::Tensor,at::Tensor> eig(const at::Tensor & self, bool eigenvectors) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some, bool compute_uv) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> svd(const at::Tensor & self, bool some, bool compute_uv) const override; | |
at::Tensor & cholesky_out(at::Tensor & result, const at::Tensor & self, bool upper) const override; | |
at::Tensor cholesky(const at::Tensor & self, bool upper) const override; | |
at::Tensor _cholesky_helper(const at::Tensor & self, bool upper) const override; | |
at::Tensor & cholesky_solve_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2, bool upper) const override; | |
at::Tensor cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper) const override; | |
at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) const override; | |
at::Tensor & potri_out(at::Tensor & result, const at::Tensor & self, bool upper) const override; | |
at::Tensor potri(const at::Tensor & self, bool upper) const override; | |
std::tuple<at::Tensor &,at::Tensor &> pstrf_out(at::Tensor & u, at::Tensor & piv, const at::Tensor & self, bool upper, at::Scalar tol) const override; | |
std::tuple<at::Tensor,at::Tensor> pstrf(const at::Tensor & self, bool upper, at::Scalar tol) const override; | |
std::tuple<at::Tensor &,at::Tensor &> qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor> qr(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> geqrf_out(at::Tensor & result0, at::Tensor & result1, const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor> geqrf(const at::Tensor & self) const override; | |
at::Tensor & orgqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2) const override; | |
at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2) const override; | |
at::Tensor & ormqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const override; | |
at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const override; | |
std::tuple<at::Tensor &,at::Tensor &> btrifact_out(at::Tensor & A_LU, at::Tensor & pivots, const at::Tensor & self, bool pivot) const override; | |
std::tuple<at::Tensor,at::Tensor> btrifact(const at::Tensor & self, bool pivot) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> btrifact_with_info_out(at::Tensor & A_LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool pivot) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> btrifact_with_info(const at::Tensor & self, bool pivot) const override; | |
at::Tensor & btrisolve_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const override; | |
at::Tensor btrisolve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const override; | |
at::Tensor & multinomial_out(at::Tensor & result, const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const override; | |
at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const override; | |
at::Tensor & lgamma_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor lgamma(const at::Tensor & self) const override; | |
at::Tensor & digamma_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor digamma(const at::Tensor & self) const override; | |
at::Tensor & polygamma_out(at::Tensor & result, int64_t n, const at::Tensor & self) const override; | |
at::Tensor polygamma(int64_t n, const at::Tensor & self) const override; | |
at::Tensor & erfinv_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor erfinv(const at::Tensor & self) const override; | |
at::Tensor & frac_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor frac(const at::Tensor & self) const override; | |
at::Tensor dist(const at::Tensor & self, const at::Tensor & other, at::Scalar p) const override; | |
at::Tensor & reciprocal_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor reciprocal(const at::Tensor & self) const override; | |
at::Tensor & neg_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor neg(const at::Tensor & self) const override; | |
at::Tensor & atan2_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor atan2(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & lerp_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const override; | |
at::Tensor & histc_out(at::Tensor & result, const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const override; | |
at::Tensor histc(const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const override; | |
at::Tensor & sign_out(at::Tensor & result, const at::Tensor & self) const override; | |
at::Tensor sign(const at::Tensor & self) const override; | |
at::Tensor & fmod_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor fmod(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & fmod_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & remainder_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor remainder(const at::Tensor & self, at::Scalar other) const override; | |
at::Tensor & remainder_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor remainder(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & min_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor min(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor min(const at::Tensor & self) const override; | |
at::Tensor & max_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor max(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor max(const at::Tensor & self) const override; | |
at::Tensor median(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool descending) const override; | |
std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, int64_t dim, bool descending) const override; | |
std::tuple<at::Tensor &,at::Tensor &> topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const override; | |
std::tuple<at::Tensor,at::Tensor> topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const override; | |
at::Tensor all(const at::Tensor & self) const override; | |
at::Tensor any(const at::Tensor & self) const override; | |
at::Tensor & renorm_out(at::Tensor & result, const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const override; | |
at::Tensor renorm(const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const override; | |
at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) const override; | |
bool equal(const at::Tensor & self, const at::Tensor & other) const override; | |
at::Tensor & pow_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) const override; | |
at::Tensor & pow_out(at::Tensor & result, at::Scalar self, const at::Tensor & exponent) const override; | |
at::Tensor pow(at::Scalar self, const at::Tensor & exponent) const override; | |
at::Tensor & normal_out(at::Tensor & output, const at::Tensor & mean, double std, at::Generator * generator) const override; | |
at::Tensor normal(const at::Tensor & mean, double std, at::Generator * generator) const override; | |
at::Tensor & normal_out(at::Tensor & output, double mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor normal(double mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor & normal_out(at::Tensor & output, const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor normal(const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const override; | |
at::Tensor alias(const at::Tensor & self) const override; | |
at::Tensor & _dirichlet_grad_out(at::Tensor & output, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const override; | |
at::Tensor _dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const override; | |
at::Tensor & binary_cross_entropy_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & mse_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & l1_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & multi_margin_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const override; | |
at::Tensor & multilabel_margin_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const override; | |
at::Tensor multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const override; | |
at::Tensor & nll_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor & nll_loss2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const override; | |
at::Tensor & smooth_l1_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & soft_margin_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const override; | |
at::Tensor & elu_out(at::Tensor & output, const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor elu(const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor & elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const override; | |
at::Tensor elu_backward(const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const override; | |
at::Tensor & elu_(at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const override; | |
at::Tensor & glu_out(at::Tensor & output, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor glu(const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const override; | |
at::Tensor & hardtanh_out(at::Tensor & output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor hardtanh(const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & hardtanh_(at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const override; | |
at::Tensor & leaky_relu_out(at::Tensor & output, const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor leaky_relu(const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor & leaky_relu_(at::Tensor & self, at::Scalar negative_slope) const override; | |
at::Tensor & log_sigmoid_out(at::Tensor & output, const at::Tensor & self) const override; | |
at::Tensor log_sigmoid(const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) const override; | |
std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward(const at::Tensor & self) const override; | |
at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const override; | |
at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const override; | |
at::Tensor & rrelu_with_noise_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor & rrelu_with_noise_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const override; | |
at::Tensor rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const override; | |
at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const override; | |
at::Tensor & softplus_out(at::Tensor & output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const override; | |
at::Tensor softplus(const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const override; | |
at::Tensor & softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const override; | |
at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const override; | |
at::Tensor & softshrink_out(at::Tensor & output, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor softshrink(const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const override; | |
at::Tensor & adaptive_avg_pool2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & adaptive_avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) const override; | |
at::Tensor adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) const override; | |
at::Tensor & adaptive_avg_pool3d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) const override; | |
at::Tensor adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) const override; | |
std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const override; | |
std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const override; | |
std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const override; | |
at::Tensor & avg_pool2d_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor avg_pool2d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor & avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor & avg_pool3d_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor avg_pool3d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const override; | |
std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const override; | |
std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d(const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const override; | |
at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const override; | |
at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const override; | |
std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const override; | |
std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d(const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const override; | |
at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const override; | |
at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const override; | |
std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const override; | |
at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const override; | |
at::Tensor & max_unpool2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor & max_unpool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor max_unpool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const override; | |
at::Tensor & max_unpool3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor & max_unpool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor max_unpool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const override; | |
at::Tensor & reflection_pad1d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor reflection_pad1d(const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & reflection_pad2d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor reflection_pad2d(const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & replication_pad1d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor replication_pad1d(const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & replication_pad2d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor replication_pad2d(const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & replication_pad3d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor replication_pad3d(const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const override; | |
at::Tensor & upsample_linear1d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor upsample_linear1d(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & upsample_bilinear2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & upsample_bicubic2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & upsample_trilinear3d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntList output_size, bool align_corners) const override; | |
at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const override; | |
at::Tensor & upsample_nearest1d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor & upsample_nearest2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor & upsample_nearest3d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntList output_size) const override; | |
at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const override; | |
at::Tensor & sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor & tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) const override; | |
at::Tensor & thnn_conv_transpose2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
at::Tensor thnn_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_transpose2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_transpose2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_transpose2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_transpose2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const override; | |
at::Tensor & thnn_conv_transpose3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
at::Tensor thnn_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_transpose3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_transpose3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_transpose3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_transpose3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
at::Tensor & thnn_conv2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv2d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
at::Tensor & thnn_conv_depthwise2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
at::Tensor thnn_conv_depthwise2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
at::Tensor & thnn_conv_depthwise2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
at::Tensor thnn_conv_depthwise2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &> thnn_conv_depthwise2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor> thnn_conv_depthwise2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, std::array<bool,2> output_mask) const override; | |
at::Tensor & thnn_conv3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
at::Tensor thnn_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
at::Tensor & thnn_conv_dilated2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
at::Tensor thnn_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_dilated2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_dilated2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_dilated2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_dilated2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const override; | |
at::Tensor & thnn_conv_dilated3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
at::Tensor thnn_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_dilated3d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_dilated3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const override; | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> thnn_conv_dilated3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const override; | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> thnn_conv_dilated3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const override; | |
at::Tensor thnn_col2im(const at::Tensor & self, at::IntList output_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor thnn_col2im_backward(const at::Tensor & grad_output, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor thnn_im2col(const at::Tensor & self, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
at::Tensor thnn_im2col_backward(const at::Tensor & grad_output, at::IntList input_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const override; | |
}; | |
at::Tensor & XLATypeBase::_th_set_(at::Tensor & self, at::Storage source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_set_(w_self, source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntList size, at::IntList stride) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_set_(w_self, source, storage_offset, size, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_set_(at::Tensor & self, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::_th_set_(w_self, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_set_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_set_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_fill_(at::Tensor & self, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_fill_(w_self, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_fill_(at::Tensor & self, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::_th_fill_(w_self, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
bool XLATypeBase::_th_is_set_to(const at::Tensor & self, const at::Tensor & tensor) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor = bridge::XlaToAtenTensor(tensor); | |
auto&& x_result = at::_th_is_set_to(r_self, r_tensor); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor & XLATypeBase::_th_masked_fill_(at::Tensor & self, const at::Tensor & mask, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::_th_masked_fill_(w_self, r_mask, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_masked_fill_(at::Tensor & self, const at::Tensor & mask, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_masked_fill_(w_self, r_mask, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::_th_masked_fill_(w_self, r_mask, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_masked_fill_(w_self, r_mask, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::_th_masked_scatter_(w_self, r_mask, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_masked_scatter_(w_self, r_mask, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_masked_select_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mask) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::_th_masked_select_out(w_result, r_self, r_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_masked_select_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mask) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_masked_select_out(w_result, r_self, r_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_masked_select(const at::Tensor & self, const at::Tensor & mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::_th_masked_select(r_self, r_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_masked_select(const at::Tensor & self, const at::Tensor & mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_masked_select(r_self, r_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_nonzero_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_nonzero_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_nonzero(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_nonzero(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_clone(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clone(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_view(const at::Tensor & self, at::IntList size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_view(r_self, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_resize_as_(at::Tensor & self, const at::Tensor & the_template) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_the_template = bridge::XlaToAtenTensor(the_template); | |
auto&& x_result = at::_th_resize_as_(w_self, r_the_template); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_index_select_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_index_select_out(w_result, r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_index_select(r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::_th_index_copy_(w_self, dim, r_index, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_take_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & index) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_take_out(w_result, r_self, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_take(const at::Tensor & self, const at::Tensor & index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_take(r_self, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::_th_put_(w_self, r_index, r_source, accumulate); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::_th_index_add_(w_self, dim, r_index, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_index_fill_(w_self, dim, r_index, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::_th_index_fill_(w_self, dim, r_index, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_unfold_out(at::Tensor & result, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_unfold_out(w_result, r_self, dimension, size, step); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_unfold(r_self, dimension, size, step); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::_th_scatter_(w_self, dim, r_index, r_src); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_scatter_(w_self, dim, r_index, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::_th_scatter_add_(w_self, dim, r_index, r_src); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_gather_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_gather_out(w_result, r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_gather(const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::_th_gather(r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
bool XLATypeBase::_th_equal(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_equal(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor & XLATypeBase::_th_and_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_and_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_and(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_and(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_and_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_and_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_and_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_and_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_and(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_and(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_and(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_and(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_iand_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_iand_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_iand_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_iand_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_iand_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_iand_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_or_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_or_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_or(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_or(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_or_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_or_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_or_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_or_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_or(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_or(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_or(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_or(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ior_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_ior_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ior_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ior_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_ior_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_ior_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_xor_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_xor_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_xor(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_xor(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_xor_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_xor_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_xor_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_xor_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_xor(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_xor(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_xor(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_xor(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ixor_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_ixor_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ixor_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ixor_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_ixor_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_ixor_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_lshift_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_lshift_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_lshift(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_lshift(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_lshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_lshift_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_lshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_lshift_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_lshift(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_lshift(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_lshift(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_lshift(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ilshift_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_ilshift_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ilshift_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ilshift_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_ilshift_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_ilshift_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_rshift_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_rshift_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_rshift(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_rshift(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_rshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_rshift_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_rshift_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_rshift_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_rshift(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_rshift(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_rshift(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_rshift(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_irshift_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_irshift_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_irshift_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_irshift_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_irshift_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_irshift_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_lt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_lt_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_lt(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_lt(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_lt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_lt_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_lt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_lt_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_lt(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_lt(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_lt(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_lt(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_lt_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_lt_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_lt_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_lt_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_lt_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_lt_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_gt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_gt_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_gt(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_gt(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_gt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_gt_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_gt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_gt_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_gt(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_gt(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_gt(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_gt(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_gt_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_gt_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_gt_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_gt_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_gt_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_gt_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_le_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_le_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_le(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_le(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_le_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_le_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_le_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_le_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_le(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_le(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_le(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_le(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_le_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_le_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_le_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_le_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_le_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_le_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ge_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_ge_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ge(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_ge(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ge_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ge_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_ge_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_ge_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ge(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ge(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_ge(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_ge(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ge_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_ge_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ge_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ge_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_ge_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_ge_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_eq_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_eq_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_eq(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_eq(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_eq_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_eq_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_eq_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_eq_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_eq(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_eq(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_eq(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_eq(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_eq_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_eq_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_eq_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_eq_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_eq_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_eq_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ne_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_ne_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ne(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_ne(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ne_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ne_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_ne_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_ne_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ne(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ne(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_ne(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_ne(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ne_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_ne_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ne_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_ne_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_ne_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_ne_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_min_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_min_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_min_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_min_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_min(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_min(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_min(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_min(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_min(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_min(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_min = bridge::XlaToAtenMutableTensor(min); | |
auto w_min_indices = bridge::XlaToAtenMutableTensor(min_indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_min_out(w_min, w_min_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(min, min_indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_min(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_min(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_th_max_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_max_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_max_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_max_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_max(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_max(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_max(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_max(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_max(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_max(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_max_out(at::Tensor & max, at::Tensor & max_indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_max = bridge::XlaToAtenMutableTensor(max); | |
auto w_max_indices = bridge::XlaToAtenMutableTensor(max_indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_max_out(w_max, w_max_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(max, max_indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_max(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_max(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_kthvalue_out(w_values, w_indices, r_self, k, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_kthvalue(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_kthvalue(r_self, k, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_mode_out(w_values, w_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_mode(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_mode(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::_th_median(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_median(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_median_out(w_values, w_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_median(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_median(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool descending) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sort_out(w_values, w_indices, r_self, dim, descending); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_sort(const at::Tensor & self, int64_t dim, bool descending) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sort(r_self, dim, descending); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_topk_out(w_values, w_indices, r_self, k, dim, largest, sorted); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_topk(r_self, k, dim, largest, sorted); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::_th_any(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_any(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_any_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_any_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_any(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_any(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_abs_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_abs_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_abs(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_abs(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_sigmoid_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sigmoid_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_sigmoid(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sigmoid(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_log_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_log(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_log10_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log10_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_log10(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log10(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_log1p_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log1p_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_log1p(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log1p(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_log2_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log2_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_log2(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_log2(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_lgamma_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_lgamma_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_lgamma(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_lgamma(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_lgamma_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_lgamma_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_digamma_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_digamma_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_digamma(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_digamma(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_digamma_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_digamma_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_polygamma_out(at::Tensor & result, int64_t n, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_polygamma_out(w_result, n, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_polygamma(int64_t n, const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_polygamma(n, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_polygamma_(at::Tensor & self, int64_t n) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_polygamma_(w_self, n); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_exp_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_exp_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_exp(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_exp(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_expm1_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_expm1_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_expm1(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_expm1(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_cos_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cos_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_cos(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cos(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_acos_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_acos_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_acos(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_acos(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_cosh_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cosh_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_cosh(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cosh(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_sin_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sin_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_sin(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sin(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_asin_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_asin_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_asin(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_asin(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_sinh_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sinh_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_sinh(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sinh(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_tan_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_tan_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_tan(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_tan(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_atan_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_atan_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_atan(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_atan(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_tanh_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_tanh_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_tanh(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_tanh(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_erf_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_erf_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_erf(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_erf(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_erfc_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_erfc_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_erfc(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_erfc(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_erfinv_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_erfinv_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_erfinv_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_erfinv_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_erfinv(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_erfinv(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_sqrt_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sqrt_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_sqrt(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sqrt(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_rsqrt_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_rsqrt_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_rsqrt(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_rsqrt(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ceil_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_ceil_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ceil(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_ceil(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_floor_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_floor_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_floor(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_floor(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_round_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_round_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_round(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_round(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_trunc_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_trunc_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_trunc(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_trunc(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_frac_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_frac_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_frac_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_frac_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_frac(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_frac(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_var_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_var_out(w_result, r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_var(const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_var(r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_var(const at::Tensor & self, bool unbiased) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_var(r_self, unbiased); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_std_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_std_out(w_result, r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_std(const at::Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_std(r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_std(const at::Tensor & self, bool unbiased) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_std(r_self, unbiased); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_renorm_out(at::Tensor & result, const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_renorm_out(w_result, r_self, p, dim, maxnorm); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_renorm(const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_renorm(r_self, p, dim, maxnorm); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_renorm_(at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_renorm_(w_self, p, dim, maxnorm); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::_th_dist(const at::Tensor & self, const at::Tensor & other, at::Scalar p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_dist(r_self, r_other, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_dist(const at::Tensor & self, const at::Tensor & other, at::Scalar p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_dist(r_self, r_other, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_reciprocal_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_reciprocal_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_reciprocal(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_reciprocal(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_reciprocal_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_reciprocal_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_neg_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_neg_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_neg(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_neg(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_neg_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_neg_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_atan2_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_atan2_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_atan2_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_atan2_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_atan2(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_atan2(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_atan2(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_atan2(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_atan2_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_atan2_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_atan2_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_atan2_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_pow_out(at::Tensor & result, const at::Tensor & self, at::Scalar exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_pow_out(w_result, r_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_pow(const at::Tensor & self, at::Scalar exponent) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_pow(r_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_pow_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::_th_pow_out(w_result, r_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_pow_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_pow_out(w_result, r_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_pow(const at::Tensor & self, const at::Tensor & exponent) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::_th_pow(r_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_pow(const at::Tensor & self, const at::Tensor & exponent) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_pow(r_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_pow_out(at::Tensor & result, at::Scalar self, const at::Tensor & exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::_th_pow_out(w_result, self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_pow(at::Scalar self, const at::Tensor & exponent) const { | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::_th_pow(self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(exponent)); | |
} | |
at::Tensor & XLATypeBase::_th_pow_(at::Tensor & self, at::Scalar exponent) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_pow_(w_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_pow_(at::Tensor & self, const at::Tensor & exponent) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::_th_pow_(w_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_pow_(at::Tensor & self, const at::Tensor & exponent) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_pow_(w_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_lerp_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::_th_lerp_out(w_result, r_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_lerp_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_lerp_out(w_result, r_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_lerp(const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::_th_lerp(r_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_lerp(const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_lerp(r_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_lerp_(at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::_th_lerp_(w_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_lerp_(at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_lerp_(w_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_histc_out(at::Tensor & result, const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_histc_out(w_result, r_self, bins, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_histc(const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_histc(r_self, bins, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_zero_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_zero_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_cumsum_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cumsum_out(w_result, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_cumsum(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cumsum(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_cumprod_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cumprod_out(w_result, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_cumprod(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_cumprod(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_sign_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sign_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_sign(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_sign(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_sign_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_sign_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::_th_trace(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_trace(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_fmod_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_fmod_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_fmod(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_fmod(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_fmod_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_fmod_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_fmod_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_fmod_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_fmod(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_fmod(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_fmod(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_fmod(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_fmod_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_fmod_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_fmod_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_fmod_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_fmod_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_fmod_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_remainder_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_remainder_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_remainder(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_remainder(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_remainder_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_remainder_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_remainder_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_remainder_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_remainder(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_remainder(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_remainder(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_remainder(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_remainder_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_remainder_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_remainder_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_remainder_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_remainder_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_remainder_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_clamp_out(at::Tensor & result, const at::Tensor & self, at::Scalar min, at::Scalar max) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clamp_out(w_result, r_self, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_clamp(const at::Tensor & self, at::Scalar min, at::Scalar max) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clamp(r_self, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_clamp_min_out(at::Tensor & result, const at::Tensor & self, at::Scalar min) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clamp_min_out(w_result, r_self, min); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_clamp_min(const at::Tensor & self, at::Scalar min) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clamp_min(r_self, min); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_clamp_max_out(at::Tensor & result, const at::Tensor & self, at::Scalar max) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clamp_max_out(w_result, r_self, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_clamp_max(const at::Tensor & self, at::Scalar max) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_clamp_max(r_self, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_th_dot(const at::Tensor & self, const at::Tensor & tensor) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor = bridge::XlaToAtenTensor(tensor); | |
auto&& x_result = at::_th_dot(r_self, r_tensor); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_cross_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_cross_out(w_result, r_self, r_other, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_th_cross(r_self, r_other, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_diag_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_diag_out(w_result, r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_diag(const at::Tensor & self, int64_t diagonal) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_diag(r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_addmm_out(w_result, r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addmm_out(w_result, r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_addmm(r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addmm(r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_addmm_(w_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_addmv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::_th_addmv_out(w_result, r_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_addmv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addmv_out(w_result, r_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::_th_addmv(r_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addmv(r_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::_th_addmv_(w_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_addr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::_th_addr_out(w_result, r_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_addr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addr_out(w_result, r_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::_th_addr(r_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addr(r_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addr_(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::_th_addr_(w_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_ger_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::_th_ger_out(w_result, r_self, r_vec2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ger(const at::Tensor & self, const at::Tensor & vec2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::_th_ger(r_self, r_vec2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_mv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::_th_mv_out(w_result, r_self, r_vec); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_mv(const at::Tensor & self, const at::Tensor & vec) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::_th_mv(r_self, r_vec); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_mm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_mm_out(w_result, r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_mm(const at::Tensor & self, const at::Tensor & mat2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_mm(r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_bmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_bmm_out(w_result, r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_bmm(const at::Tensor & self, const at::Tensor & mat2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::_th_bmm(r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::_th_addbmm_out(w_result, r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_addbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addbmm_out(w_result, r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::_th_addbmm(r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addbmm(r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::_th_addbmm_(w_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_baddbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::_th_baddbmm_out(w_result, r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_baddbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_baddbmm_out(w_result, r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::_th_baddbmm(r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_baddbmm(r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addcmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::_th_addcmul_out(w_result, r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_addcmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addcmul_out(w_result, r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::_th_addcmul(r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addcmul(r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::_th_addcmul_(w_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_addcmul_(w_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_addcdiv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::_th_addcdiv_out(w_result, r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::s__th_addcdiv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addcdiv_out(w_result, r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::_th_addcdiv(r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::s__th_addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::detail::infer_type(r_self).s__th_addcdiv(r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::_th_addcdiv_(w_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::s__th_addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::detail::infer_type(w_self).s__th_addcdiv_(w_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_gels_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, const at::Tensor & A) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::_th_gels_out(w_res1, w_res2, r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_gels(const at::Tensor & self, const at::Tensor & A) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::_th_gels(r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_trtrs_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::_th_trtrs_out(w_res1, w_res2, r_self, r_A, upper, transpose, unitriangular); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_trtrs(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::_th_trtrs(r_self, r_A, upper, transpose, unitriangular); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_symeig_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, bool eigenvectors, bool upper) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_symeig_out(w_res1, w_res2, r_self, eigenvectors, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_symeig(const at::Tensor & self, bool eigenvectors, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_symeig(r_self, eigenvectors, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_eig_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, bool eigenvectors) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_eig_out(w_res1, w_res2, r_self, eigenvectors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_eig(const at::Tensor & self, bool eigenvectors) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_eig(r_self, eigenvectors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_th_svd_out(at::Tensor & res1, at::Tensor & res2, at::Tensor & res3, const at::Tensor & self, bool some, bool compute_uv) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto w_res3 = bridge::XlaToAtenMutableTensor(res3); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_svd_out(w_res1, w_res2, w_res3, r_self, some, compute_uv); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(res1, res2, res3); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_th_svd(const at::Tensor & self, bool some, bool compute_uv) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_svd(r_self, some, compute_uv); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_th_getri_single_out(at::Tensor & output, const at::Tensor & self) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_getri_single_out(w_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_th_getri_single(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_getri_single(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_potri_out(at::Tensor & output, const at::Tensor & self, bool upper) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_potri_out(w_output, r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_th_potri(const at::Tensor & self, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_potri(r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_pstrf_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self, bool upper, at::Scalar tol) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_pstrf_out(w_res1, w_res2, r_self, upper, tol); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_pstrf(const at::Tensor & self, bool upper, at::Scalar tol) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_pstrf(r_self, upper, tol); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_qr_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_qr_out(w_res1, w_res2, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_qr(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_qr(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_geqrf_out(at::Tensor & res1, at::Tensor & res2, const at::Tensor & self) const { | |
auto w_res1 = bridge::XlaToAtenMutableTensor(res1); | |
auto w_res2 = bridge::XlaToAtenMutableTensor(res2); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_geqrf_out(w_res1, w_res2, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(res1, res2); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_geqrf(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_geqrf(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_th_orgqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto&& x_result = at::_th_orgqr_out(w_result, r_self, r_input2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_orgqr(const at::Tensor & self, const at::Tensor & input2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto&& x_result = at::_th_orgqr(r_self, r_input2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_ormqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_input3 = bridge::XlaToAtenTensor(input3); | |
auto&& x_result = at::_th_ormqr_out(w_result, r_self, r_input2, r_input3, left, transpose); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_input3 = bridge::XlaToAtenTensor(input3); | |
auto&& x_result = at::_th_ormqr(r_self, r_input2, r_input3, left, transpose); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_th_btrifact_out(at::Tensor & result, at::Tensor & pivots, const at::Tensor & self, bool pivot) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto w_pivots = bridge::XlaToAtenMutableTensor(pivots); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_btrifact_out(w_result, w_pivots, r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(result, pivots); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_th_btrifact(const at::Tensor & self, bool pivot) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_btrifact(r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_th_btrifact_with_info_out(at::Tensor & result, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool pivot) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto w_pivots = bridge::XlaToAtenMutableTensor(pivots); | |
auto w_info = bridge::XlaToAtenMutableTensor(info); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_btrifact_with_info_out(w_result, w_pivots, w_info, r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(result, pivots, info); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_th_btrifact_with_info(const at::Tensor & self, bool pivot) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_btrifact_with_info(r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_th_btrisolve_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_LU_data = bridge::XlaToAtenTensor(LU_data); | |
auto r_LU_pivots = bridge::XlaToAtenTensor(LU_pivots); | |
auto&& x_result = at::_th_btrisolve_out(w_result, r_self, r_LU_data, r_LU_pivots); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_btrisolve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_LU_data = bridge::XlaToAtenTensor(LU_data); | |
auto r_LU_pivots = bridge::XlaToAtenTensor(LU_pivots); | |
auto&& x_result = at::_th_btrisolve(r_self, r_LU_data, r_LU_pivots); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_random_(at::Tensor & self, int64_t from, int64_t to, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_random_(w_self, from, to, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_random_(at::Tensor & self, int64_t to, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_random_(w_self, to, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_random_(at::Tensor & self, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_random_(w_self, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_multinomial_out(at::Tensor & result, const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_multinomial_out(w_result, r_self, num_samples, replacement, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_th_multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_multinomial(r_self, num_samples, replacement, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_uniform_(at::Tensor & self, double from, double to, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_uniform_(w_self, from, to, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_normal_out(at::Tensor & output, const at::Tensor & mean, double std, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto&& x_result = at::_th_normal_out(w_output, r_mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_th_normal(const at::Tensor & mean, double std, at::Generator * generator) const { | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto&& x_result = at::_th_normal(r_mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(mean)); | |
} | |
at::Tensor & XLATypeBase::_th_normal_out(at::Tensor & output, double mean, const at::Tensor & std, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::_th_normal_out(w_output, mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_th_normal(double mean, const at::Tensor & std, at::Generator * generator) const { | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::_th_normal(mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(std)); | |
} | |
at::Tensor & XLATypeBase::_th_normal_out(at::Tensor & output, const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::_th_normal_out(w_output, r_mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_th_normal(const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const { | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::_th_normal(r_mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(std)); | |
} | |
at::Tensor & XLATypeBase::_th_normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_normal_(w_self, mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_cauchy_(at::Tensor & self, double median, double sigma, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_cauchy_(w_self, median, sigma, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_log_normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_log_normal_(w_self, mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_exponential_(at::Tensor & self, double lambd, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_exponential_(w_self, lambd, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_geometric_(at::Tensor & self, double p, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_th_geometric_(w_self, p, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_dirichlet_grad_out(at::Tensor & output, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_x = bridge::XlaToAtenTensor(x); | |
auto r_alpha = bridge::XlaToAtenTensor(alpha); | |
auto r_total = bridge::XlaToAtenTensor(total); | |
auto&& x_result = at::_th_dirichlet_grad_out(w_output, r_x, r_alpha, r_total); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_th_dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const { | |
auto r_x = bridge::XlaToAtenTensor(x); | |
auto r_alpha = bridge::XlaToAtenTensor(alpha); | |
auto r_total = bridge::XlaToAtenTensor(total); | |
auto&& x_result = at::_th_dirichlet_grad(r_x, r_alpha, r_total); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(total)); | |
} | |
at::Tensor XLATypeBase::_th_alias(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_th_alias(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_th_copy_ignoring_overlaps_(at::Tensor & self, const at::Tensor & src) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::_th_copy_ignoring_overlaps_(w_self, r_src); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_th_cat_out(at::Tensor & self, at::TensorList tensors, int64_t dim) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::_th_cat_out(w_self, l_tensors, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::_th_cat(at::TensorList tensors, int64_t dim) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::_th_cat(l_tensors, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor & XLATypeBase::_thnn_binary_cross_entropy_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_binary_cross_entropy_forward_out(w_output, r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_binary_cross_entropy_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_binary_cross_entropy_forward(r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_binary_cross_entropy_backward_out(w_grad_input, r_grad_output, r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_binary_cross_entropy_backward(r_grad_output, r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_l1_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_l1_loss_forward_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_l1_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_l1_loss_forward(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_l1_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_l1_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_mse_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_mse_loss_forward_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_mse_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_mse_loss_forward(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_mse_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_mse_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_multi_margin_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_multi_margin_loss_forward_out(w_output, r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_multi_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_multi_margin_loss_forward(r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_multi_margin_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_multi_margin_loss_backward(r_grad_output, r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_is_target = bridge::XlaToAtenMutableTensor(is_target); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_multilabel_margin_loss_forward_out(w_output, w_is_target, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, is_target); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_multilabel_margin_loss_forward(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_is_target = bridge::XlaToAtenTensor(is_target); | |
auto&& x_result = at::_thnn_multilabel_margin_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction, r_is_target); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_is_target = bridge::XlaToAtenTensor(is_target); | |
auto&& x_result = at::_thnn_multilabel_margin_loss_backward(r_grad_output, r_self, r_target, reduction, r_is_target); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_total_weight = bridge::XlaToAtenMutableTensor(total_weight); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_nll_loss_forward_out(w_output, w_total_weight, r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, total_weight); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_nll_loss_forward(r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::_thnn_nll_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::_thnn_nll_loss_backward(r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_total_weight = bridge::XlaToAtenMutableTensor(total_weight); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_nll_loss2d_forward_out(w_output, w_total_weight, r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, total_weight); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_nll_loss2d_forward(r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::_thnn_nll_loss2d_backward_out(w_grad_input, r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::_thnn_nll_loss2d_backward(r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_smooth_l1_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_smooth_l1_loss_forward_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_smooth_l1_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_smooth_l1_loss_forward(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_smooth_l1_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_smooth_l1_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_soft_margin_loss_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_soft_margin_loss_forward_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_soft_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_soft_margin_loss_forward(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_soft_margin_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::_thnn_soft_margin_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_elu_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_elu_forward_out(w_output, r_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_elu_forward(const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_elu_forward(r_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_elu_backward_out(w_grad_input, r_grad_output, alpha, scale, input_scale, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_elu_backward(const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_elu_backward(r_grad_output, alpha, scale, input_scale, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_elu_(at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_thnn_elu_(w_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_elu_forward_(at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_thnn_elu_forward_(w_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_glu_forward_out(at::Tensor & output, const at::Tensor & self, int64_t dim) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_glu_forward_out(w_output, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_glu_forward(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_glu_forward(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_glu_backward_out(w_grad_input, r_grad_output, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_glu_backward(r_grad_output, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_hardtanh_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_hardtanh_forward_out(w_output, r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_hardtanh_forward(const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_hardtanh_forward(r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_hardtanh_backward_out(w_grad_input, r_grad_output, r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_hardtanh_backward(r_grad_output, r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_hardtanh_(at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_thnn_hardtanh_(w_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_hardtanh_forward_(at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_thnn_hardtanh_forward_(w_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_leaky_relu_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_leaky_relu_forward_out(w_output, r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_leaky_relu_forward(const at::Tensor & self, at::Scalar negative_slope) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_leaky_relu_forward(r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_leaky_relu_backward_out(w_grad_input, r_grad_output, r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_leaky_relu_backward(r_grad_output, r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_leaky_relu_(at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_thnn_leaky_relu_(w_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_leaky_relu_forward_(at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::_thnn_leaky_relu_forward_(w_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_buffer = bridge::XlaToAtenMutableTensor(buffer); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_log_sigmoid_forward_out(w_output, w_buffer, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, buffer); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_log_sigmoid_forward(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_log_sigmoid_forward(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_buffer = bridge::XlaToAtenTensor(buffer); | |
auto&& x_result = at::_thnn_log_sigmoid_backward_out(w_grad_input, r_grad_output, r_self, r_buffer); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_buffer = bridge::XlaToAtenTensor(buffer); | |
auto&& x_result = at::_thnn_log_sigmoid_backward(r_grad_output, r_self, r_buffer); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_rrelu_with_noise_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::_thnn_rrelu_with_noise_forward_out(w_output, r_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_rrelu_with_noise_forward(const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::_thnn_rrelu_with_noise_forward(r_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_rrelu_with_noise_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::_thnn_rrelu_with_noise_backward_out(w_grad_input, r_grad_output, r_self, r_noise, lower, upper, training); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::_thnn_rrelu_with_noise_backward(r_grad_output, r_self, r_noise, lower, upper, training); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::_thnn_rrelu_with_noise_(w_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_rrelu_with_noise_forward_(at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::_thnn_rrelu_with_noise_forward_(w_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_thnn_softplus_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_softplus_forward_out(w_output, r_self, beta, threshold); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_softplus_forward(const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_softplus_forward(r_self, beta, threshold); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_softplus_backward_out(w_grad_input, r_grad_output, r_self, beta, threshold, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_softplus_backward(r_grad_output, r_self, beta, threshold, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_softshrink_forward_out(at::Tensor & output, const at::Tensor & self, at::Scalar lambd) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_softshrink_forward_out(w_output, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_softshrink_forward(const at::Tensor & self, at::Scalar lambd) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_softshrink_forward(r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_softshrink_backward_out(w_grad_input, r_grad_output, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_softshrink_backward(r_grad_output, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_adaptive_avg_pool3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_avg_pool3d_forward_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_adaptive_avg_pool3d_forward(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_avg_pool3d_forward(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_avg_pool3d_backward_out(w_grad_input, r_grad_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_avg_pool3d_backward(r_grad_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_adaptive_max_pool2d_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_max_pool2d_forward_out(w_output, w_indices, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_adaptive_max_pool2d_forward(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_max_pool2d_forward(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_adaptive_max_pool2d_backward_out(w_grad_input, r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_adaptive_max_pool2d_backward(r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_adaptive_max_pool3d_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_max_pool3d_forward_out(w_output, w_indices, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_adaptive_max_pool3d_forward(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_adaptive_max_pool3d_forward(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_adaptive_max_pool3d_backward_out(w_grad_input, r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_adaptive_max_pool3d_backward(r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_avg_pool2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool2d_forward_out(w_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_avg_pool2d_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool2d_forward(r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool2d_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool2d_backward(r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_avg_pool3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool3d_forward_out(w_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_avg_pool3d_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool3d_forward(r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool3d_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_avg_pool3d_backward(r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_max_pool2d_with_indices_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_max_pool2d_with_indices_forward_out(w_output, w_indices, r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_max_pool2d_with_indices_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_max_pool2d_with_indices_forward(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_pool2d_with_indices_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_pool2d_with_indices_backward(r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_max_pool3d_with_indices_forward_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_max_pool3d_with_indices_forward_out(w_output, w_indices, r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_max_pool3d_with_indices_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_max_pool3d_with_indices_forward(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_pool3d_with_indices_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_pool3d_with_indices_backward(r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_max_unpool2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool2d_forward_out(w_output, r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_max_unpool2d_forward(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool2d_forward(r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_max_unpool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool2d_backward_out(w_grad_input, r_grad_output, r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_max_unpool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool2d_backward(r_grad_output, r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_max_unpool3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool3d_forward_out(w_output, r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_max_unpool3d_forward(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool3d_forward(r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_max_unpool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool3d_backward_out(w_grad_input, r_grad_output, r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_max_unpool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::_thnn_max_unpool3d_backward(r_grad_output, r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_linear1d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_linear1d_forward_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_linear1d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_linear1d_forward(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_linear1d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_linear1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_linear1d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_bilinear2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_bilinear2d_forward_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_bilinear2d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_bilinear2d_forward(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_bilinear2d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_bilinear2d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_bicubic2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_bicubic2d_forward_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_bicubic2d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_bicubic2d_forward(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_bicubic2d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_bicubic2d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_trilinear3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_trilinear3d_forward_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_trilinear3d_forward(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_trilinear3d_forward(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_trilinear3d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_trilinear3d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_nearest1d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_nearest1d_forward_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_nearest1d_forward(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_nearest1d_forward(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_nearest1d_backward_out(w_grad_input, r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_nearest1d_backward(r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_nearest2d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_nearest2d_forward_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_nearest2d_forward(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_nearest2d_forward(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_nearest2d_backward_out(w_grad_input, r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_nearest2d_backward(r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_nearest3d_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_nearest3d_forward_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_nearest3d_forward(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_upsample_nearest3d_forward(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_nearest3d_backward_out(w_grad_input, r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_upsample_nearest3d_backward(r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_sigmoid_forward_out(at::Tensor & output, const at::Tensor & self) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_sigmoid_forward_out(w_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_sigmoid_forward(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_sigmoid_forward(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_sigmoid_backward_out(w_grad_input, r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_sigmoid_backward(r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_tanh_forward_out(at::Tensor & output, const at::Tensor & self) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_tanh_forward_out(w_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_tanh_forward(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_tanh_forward(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_tanh_backward_out(w_grad_input, r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_thnn_tanh_backward(r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(output)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_transpose2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_columns = bridge::XlaToAtenMutableTensor(columns); | |
auto w_ones = bridge::XlaToAtenMutableTensor(ones); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_transpose2d_forward_out(w_output, w_columns, w_ones, r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, columns, ones); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_transpose2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_transpose2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_transpose2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::_thnn_conv_transpose2d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_columns, r_ones); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_transpose2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::_thnn_conv_transpose2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_columns, r_ones, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_transpose3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_finput = bridge::XlaToAtenMutableTensor(finput); | |
auto w_fgrad_input = bridge::XlaToAtenMutableTensor(fgrad_input); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_transpose3d_forward_out(w_output, w_finput, w_fgrad_input, r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, finput, fgrad_input); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_transpose3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_transpose3d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_transpose3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::_thnn_conv_transpose3d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_finput, r_fgrad_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_transpose3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::_thnn_conv_transpose3d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_finput, r_fgrad_input, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv2d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_finput = bridge::XlaToAtenMutableTensor(finput); | |
auto w_fgrad_input = bridge::XlaToAtenMutableTensor(fgrad_input); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv2d_forward_out(w_output, w_finput, w_fgrad_input, r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, finput, fgrad_input); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::_thnn_conv2d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::_thnn_conv2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_conv_depthwise2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_depthwise2d_forward_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_conv_depthwise2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_depthwise2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_depthwise2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_conv_depthwise2d_backward_out(w_grad_input, w_grad_weight, r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(grad_input, grad_weight); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_depthwise2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, std::array<bool,2> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::_thnn_conv_depthwise2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_finput = bridge::XlaToAtenMutableTensor(finput); | |
auto w_fgrad_input = bridge::XlaToAtenMutableTensor(fgrad_input); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv3d_forward_out(w_output, w_finput, w_fgrad_input, r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, finput, fgrad_input); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv3d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::_thnn_conv3d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::_thnn_conv3d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_dilated2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_columns = bridge::XlaToAtenMutableTensor(columns); | |
auto w_ones = bridge::XlaToAtenMutableTensor(ones); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_dilated2d_forward_out(w_output, w_columns, w_ones, r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, columns, ones); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_dilated2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_dilated2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_dilated2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::_thnn_conv_dilated2d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation, r_columns, r_ones); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_dilated2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::_thnn_conv_dilated2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation, r_columns, r_ones, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_dilated3d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_columns = bridge::XlaToAtenMutableTensor(columns); | |
auto w_ones = bridge::XlaToAtenMutableTensor(ones); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_dilated3d_forward_out(w_output, w_columns, w_ones, r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, columns, ones); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_dilated3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_thnn_conv_dilated3d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::_thnn_conv_dilated3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::_thnn_conv_dilated3d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation, r_columns, r_ones); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_conv_dilated3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::_thnn_conv_dilated3d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation, r_columns, r_ones, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::_thnn_col2im_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_col2im_forward_out(w_output, r_self, output_size, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_col2im_forward(const at::Tensor & self, at::IntList output_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_col2im_forward(r_self, output_size, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_col2im_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_col2im_backward_out(w_grad_input, r_grad_output, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_col2im_backward(const at::Tensor & grad_output, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_col2im_backward(r_grad_output, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::_thnn_im2col_forward_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_im2col_forward_out(w_output, r_self, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_thnn_im2col_forward(const at::Tensor & self, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_thnn_im2col_forward(r_self, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_thnn_im2col_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList input_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_im2col_backward_out(w_grad_input, r_grad_output, input_size, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::_thnn_im2col_backward(const at::Tensor & grad_output, at::IntList input_size, at::IntList kernel_size, at::IntList dilation, at::IntList padding, at::IntList stride) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::_thnn_im2col_backward(r_grad_output, input_size, kernel_size, dilation, padding, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor XLATypeBase::_cast_Byte(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Byte(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Char(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Char(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Double(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Double(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Float(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Float(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Int(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Int(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Long(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Long(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Short(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Short(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cast_Half(const at::Tensor & self, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cast_Half(r_self, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_fused_dropout(const at::Tensor & self, double p, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_fused_dropout(r_self, p, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::_masked_scale(const at::Tensor & self, const at::Tensor & mask, double scale) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::_masked_scale(r_self, r_mask, scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_shape = bridge::XlaToAtenTensor(shape); | |
auto&& x_result = at::_reshape_from_tensor(r_self, r_shape); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_shape_as_tensor(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_shape_as_tensor(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::dropout(const at::Tensor & input, double p, bool train) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto&& x_result = at::dropout(r_input, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(input)); | |
} | |
at::Tensor & XLATypeBase::dropout_(at::Tensor & self, double p, bool train) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::dropout_(w_self, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::feature_dropout(const at::Tensor & input, double p, bool train) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto&& x_result = at::feature_dropout(r_input, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(input)); | |
} | |
at::Tensor & XLATypeBase::feature_dropout_(at::Tensor & self, double p, bool train) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::feature_dropout_(w_self, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::alpha_dropout(const at::Tensor & input, double p, bool train) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto&& x_result = at::alpha_dropout(r_input, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(input)); | |
} | |
at::Tensor & XLATypeBase::alpha_dropout_(at::Tensor & self, double p, bool train) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::alpha_dropout_(w_self, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::feature_alpha_dropout(const at::Tensor & input, double p, bool train) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto&& x_result = at::feature_alpha_dropout(r_input, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(input)); | |
} | |
at::Tensor & XLATypeBase::feature_alpha_dropout_(at::Tensor & self, double p, bool train) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::feature_alpha_dropout_(w_self, p, train); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::abs(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::abs(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::abs_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::abs_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::abs_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::abs_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::acos(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::acos(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::acos_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::acos_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::acos_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::acos_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::avg_pool1d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool1d(r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::adaptive_avg_pool1d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool1d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::adaptive_max_pool1d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_max_pool1d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::add(const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::add(r_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::add_(at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::add_(w_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::add_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::add_out(w_result, r_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::add(const at::Tensor & self, at::Scalar other, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::add(r_self, other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::add_(at::Tensor & self, at::Scalar other, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::add_(w_self, other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::addmv(r_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::addmv_(w_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::addmv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat = bridge::XlaToAtenTensor(mat); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::addmv_out(w_result, r_self, r_mat, r_vec, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::addr(r_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addr_(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::native::addr_(w_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::addr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec1 = bridge::XlaToAtenTensor(vec1); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::addr_out(w_result, r_self, r_vec1, r_vec2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::affine_grid_generator(const at::Tensor & theta, at::IntList size) const { | |
auto r_theta = bridge::XlaToAtenTensor(theta); | |
auto&& x_result = at::affine_grid_generator(r_theta, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(theta)); | |
} | |
at::Tensor XLATypeBase::affine_grid_generator_backward(const at::Tensor & grad, at::IntList size) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto&& x_result = at::affine_grid_generator_backward(r_grad, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad)); | |
} | |
at::Tensor XLATypeBase::all(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::all(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::all_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::all_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
bool XLATypeBase::allclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::allclose(r_self, r_other, rtol, atol, equal_nan); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor XLATypeBase::any(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::any(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::any_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::any_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::arange_out(at::Tensor & result, at::Scalar end) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::arange_out(w_result, end); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::arange_out(at::Tensor & result, at::Scalar start, at::Scalar end, at::Scalar step) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::arange_out(w_result, start, end, step); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_dim_arange(const at::Tensor & like, int64_t dim) const { | |
auto r_like = bridge::XlaToAtenTensor(like); | |
auto&& x_result = at::_dim_arange(r_like, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(like)); | |
} | |
at::Tensor XLATypeBase::argmax(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::argmax(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::argmax(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::argmax(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_argmax(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_argmax(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::argmin(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::argmin(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::argmin(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::argmin(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_argmin(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_argmin(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::as_strided(const at::Tensor & self, at::IntList size, at::IntList stride, c10::optional<int64_t> storage_offset) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::as_strided(r_self, size, stride, storage_offset); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::as_strided_(at::Tensor & self, at::IntList size, at::IntList stride, c10::optional<int64_t> storage_offset) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::as_strided_(w_self, size, stride, storage_offset); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::asin(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::asin(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::asin_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::asin_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::asin_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::asin_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::atan(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::atan(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::atan_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::atan_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::atan_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::atan_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::baddbmm(r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::detail::infer_type(w_self).baddbmm_(w_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::_baddbmm_mkl_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::_baddbmm_mkl_(w_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::baddbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::baddbmm_out(w_result, r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::batch_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto&& x_result = at::batch_norm(r_input, r_weight, r_bias, r_running_mean, r_running_var, training, momentum, eps, cudnn_enabled); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(running_var)); | |
} | |
at::Tensor XLATypeBase::bernoulli(const at::Tensor & self, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::bernoulli(r_self, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::bernoulli_out(at::Tensor & result, const at::Tensor & self, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::bernoulli_out(w_result, r_self, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::bernoulli_(at::Tensor & self, const at::Tensor & p, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_p = bridge::XlaToAtenTensor(p); | |
auto&& x_result = at::detail::infer_type(w_self).bernoulli_(w_self, r_p, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::bernoulli_(at::Tensor & self, double p, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::detail::infer_type(w_self).bernoulli_(w_self, p, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::bernoulli(const at::Tensor & self, double p, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::bernoulli(r_self, p, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const at::Tensor & bias) const { | |
auto r_input1 = bridge::XlaToAtenTensor(input1); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::bilinear(r_input1, r_input2, r_weight, r_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, const at::Tensor & pos_weight, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_pos_weight = bridge::XlaToAtenTensor(pos_weight); | |
auto&& x_result = at::binary_cross_entropy_with_logits(r_self, r_target, r_weight, r_pos_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::binary_cross_entropy_with_logits_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, const at::Tensor & pos_weight, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_pos_weight = bridge::XlaToAtenTensor(pos_weight); | |
auto&& x_result = at::binary_cross_entropy_with_logits_backward(r_grad_output, r_self, r_target, r_weight, r_pos_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::bincount(const at::Tensor & self, const at::Tensor & weights, int64_t minlength) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weights = bridge::XlaToAtenTensor(weights); | |
auto&& x_result = at::bincount(r_self, r_weights, minlength); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::bmm(const at::Tensor & self, const at::Tensor & mat2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::bmm(r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::bmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::bmm_out(w_result, r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
std::vector<at::Tensor> XLATypeBase::broadcast_tensors(at::TensorList tensors) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::broadcast_tensors(l_tensors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensors(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor XLATypeBase::cat(at::TensorList tensors, int64_t dim) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::cat(l_tensors, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor & XLATypeBase::cat_out(at::Tensor & result, at::TensorList tensors, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::cat_out(w_result, l_tensors, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ceil(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ceil(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ceil_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::ceil_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::ceil_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ceil_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::chain_matmul(at::TensorList matrices) const { | |
auto l_matrices = bridge::XlaCreateTensorList(matrices); | |
auto&& x_result = at::chain_matmul(l_matrices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(matrices)); | |
} | |
std::vector<at::Tensor> XLATypeBase::chunk(const at::Tensor & self, int64_t chunks, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::chunk(r_self, chunks, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensors(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::clamp(const at::Tensor & self, c10::optional<at::Scalar> min, c10::optional<at::Scalar> max) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clamp(r_self, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::clamp_(at::Tensor & self, c10::optional<at::Scalar> min, c10::optional<at::Scalar> max) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::clamp_(w_self, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::clamp_out(at::Tensor & result, const at::Tensor & self, c10::optional<at::Scalar> min, c10::optional<at::Scalar> max) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clamp_out(w_result, r_self, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::clamp_max(const at::Tensor & self, at::Scalar max) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clamp_max(r_self, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::clamp_max_(at::Tensor & self, at::Scalar max) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::clamp_max_(w_self, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::clamp_max_out(at::Tensor & result, const at::Tensor & self, at::Scalar max) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clamp_max_out(w_result, r_self, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::clamp_min(const at::Tensor & self, at::Scalar min) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clamp_min(r_self, min); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::clamp_min_(at::Tensor & self, at::Scalar min) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::clamp_min_(w_self, min); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::clamp_min_out(at::Tensor & result, const at::Tensor & self, at::Scalar min) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clamp_min_out(w_result, r_self, min); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::constant_pad_nd(const at::Tensor & self, at::IntList pad, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::constant_pad_nd(r_self, pad, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::contiguous(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::contiguous(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::convolution(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding, int64_t groups) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::convolution(r_input, r_weight, r_bias, stride, padding, dilation, transposed, output_padding, groups); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::_convolution(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_convolution(r_input, r_weight, r_bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::_convolution_nogroup(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::_convolution_nogroup(r_input, r_weight, r_bias, stride, padding, dilation, transposed, output_padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_convolution_double_backward(const at::Tensor & ggI, const at::Tensor & ggW, const at::Tensor & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntList stride, at::IntList padding, at::IntList dilation, bool transposed, at::IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) const { | |
auto r_ggI = bridge::XlaToAtenTensor(ggI); | |
auto r_ggW = bridge::XlaToAtenTensor(ggW); | |
auto r_ggb = bridge::XlaToAtenTensor(ggb); | |
auto r_gO = bridge::XlaToAtenTensor(gO); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_convolution_double_backward(r_ggI, r_ggW, r_ggb, r_gO, r_weight, r_self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::conv1d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, int64_t groups) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv1d(r_input, r_weight, r_bias, stride, padding, dilation, groups); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::conv2d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, int64_t groups) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv2d(r_input, r_weight, r_bias, stride, padding, dilation, groups); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::conv3d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation, int64_t groups) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv3d(r_input, r_weight, r_bias, stride, padding, dilation, groups); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv_tbc(r_self, r_weight, r_bias, pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv_tbc_backward(r_self, r_input, r_weight, r_bias, pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, int64_t groups, at::IntList dilation) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv_transpose1d(r_input, r_weight, r_bias, stride, padding, output_padding, groups, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, int64_t groups, at::IntList dilation) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv_transpose2d(r_input, r_weight, r_bias, stride, padding, output_padding, groups, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, int64_t groups, at::IntList dilation) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::conv_transpose3d(r_input, r_weight, r_bias, stride, padding, output_padding, groups, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor & XLATypeBase::s_copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::s_copy_(w_self, r_src, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::_s_copy_from(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_dst = bridge::XlaToAtenTensor(dst); | |
auto&& x_result = at::_s_copy_from(r_self, r_dst, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
void XLATypeBase::_copy_same_type_(at::Tensor & self, const at::Tensor & src) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
at::_copy_same_type_(w_self, r_src); | |
} | |
at::Tensor XLATypeBase::cos(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cos(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::cos_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::cos_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::cos_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cos_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::cosh(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cosh(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::cosh_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::cosh_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::cosh_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cosh_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) const { | |
auto r_input1 = bridge::XlaToAtenTensor(input1); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::cosine_embedding_loss(r_input1, r_input2, r_target, margin, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(target)); | |
} | |
at::Tensor XLATypeBase::cumsum(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumsum(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::cumsum(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumsum(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::cumsum_out(at::Tensor & result, const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumsum_out(w_result, r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::cumsum_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumsum_out(w_result, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::cumprod(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumprod(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::cumprod(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumprod(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::cumprod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumprod_out(w_result, r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::cumprod_out(at::Tensor & result, const at::Tensor & self, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cumprod_out(w_result, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntList input_lengths, at::IntList target_lengths, int64_t blank, int64_t reduction) const { | |
auto r_log_probs = bridge::XlaToAtenTensor(log_probs); | |
auto r_targets = bridge::XlaToAtenTensor(targets); | |
auto&& x_result = at::ctc_loss(r_log_probs, r_targets, input_lengths, target_lengths, blank, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(targets)); | |
} | |
at::Tensor XLATypeBase::ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction) const { | |
auto r_log_probs = bridge::XlaToAtenTensor(log_probs); | |
auto r_targets = bridge::XlaToAtenTensor(targets); | |
auto r_input_lengths = bridge::XlaToAtenTensor(input_lengths); | |
auto r_target_lengths = bridge::XlaToAtenTensor(target_lengths); | |
auto&& x_result = at::ctc_loss(r_log_probs, r_targets, r_input_lengths, r_target_lengths, blank, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(target_lengths)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntList input_lengths, at::IntList target_lengths, int64_t blank) const { | |
auto r_log_probs = bridge::XlaToAtenTensor(log_probs); | |
auto r_targets = bridge::XlaToAtenTensor(targets); | |
auto&& x_result = at::_ctc_loss(r_log_probs, r_targets, input_lengths, target_lengths, blank); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(targets)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(targets))); | |
} | |
at::Tensor XLATypeBase::_ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntList input_lengths, at::IntList target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_log_probs = bridge::XlaToAtenTensor(log_probs); | |
auto r_targets = bridge::XlaToAtenTensor(targets); | |
auto r_neg_log_likelihood = bridge::XlaToAtenTensor(neg_log_likelihood); | |
auto r_log_alpha = bridge::XlaToAtenTensor(log_alpha); | |
auto&& x_result = at::_ctc_loss_backward(r_grad, r_log_probs, r_targets, input_lengths, target_lengths, r_neg_log_likelihood, r_log_alpha, blank); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(log_alpha)); | |
} | |
at::Tensor XLATypeBase::det(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::det(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::diag_embed(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::diag_embed(r_self, offset, dim1, dim2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::diagflat(const at::Tensor & self, int64_t offset) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::diagflat(r_self, offset); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::diagonal(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::diagonal(r_self, offset, dim1, dim2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::div(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::div(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::div_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::div_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::div_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::div_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::div(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::div(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::div_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::div_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::dot(const at::Tensor & self, const at::Tensor & tensor) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor = bridge::XlaToAtenTensor(tensor); | |
auto&& x_result = at::dot(r_self, r_tensor); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::dot_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor = bridge::XlaToAtenTensor(tensor); | |
auto&& x_result = at::dot_out(w_result, r_self, r_tensor); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::einsum(std::string equation, at::TensorList tensors) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::einsum(equation, l_tensors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor XLATypeBase::embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const { | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::embedding(r_weight, r_indices, padding_idx, scale_grad_by_freq, sparse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(indices)); | |
} | |
at::Tensor XLATypeBase::embedding_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::embedding_backward(r_grad, r_indices, num_weights, padding_idx, scale_grad_by_freq, sparse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(indices)); | |
} | |
at::Tensor XLATypeBase::embedding_dense_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::embedding_dense_backward(r_grad, r_indices, num_weights, padding_idx, scale_grad_by_freq); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(indices)); | |
} | |
at::Tensor & XLATypeBase::embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::embedding_renorm_(w_self, r_indices, max_norm, norm_type); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::embedding_sparse_backward(r_grad, r_indices, num_weights, padding_idx, scale_grad_by_freq); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(indices)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> XLATypeBase::embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const { | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto r_offsets = bridge::XlaToAtenTensor(offsets); | |
auto&& x_result = at::embedding_bag(r_weight, r_indices, r_offsets, scale_grad_by_freq, mode, sparse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(offsets)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(offsets)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(offsets)), bridge::CreateXlaTensor(std::get<3>(x_result), bridge::XlaTensorDevice(offsets))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const { | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto r_offsets = bridge::XlaToAtenTensor(offsets); | |
auto&& x_result = at::_embedding_bag(r_weight, r_indices, r_offsets, scale_grad_by_freq, mode, sparse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(offsets)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(offsets)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(offsets)), bridge::CreateXlaTensor(std::get<3>(x_result), bridge::XlaTensorDevice(offsets))); | |
} | |
at::Tensor XLATypeBase::_embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto r_offsets = bridge::XlaToAtenTensor(offsets); | |
auto r_offset2bag = bridge::XlaToAtenTensor(offset2bag); | |
auto r_bag_size = bridge::XlaToAtenTensor(bag_size); | |
auto r_maximum_indices = bridge::XlaToAtenTensor(maximum_indices); | |
auto&& x_result = at::_embedding_bag_backward(r_grad, r_indices, r_offsets, r_offset2bag, r_bag_size, r_maximum_indices, num_weights, scale_grad_by_freq, mode, sparse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(maximum_indices)); | |
} | |
at::Tensor XLATypeBase::_embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto r_offsets = bridge::XlaToAtenTensor(offsets); | |
auto r_offset2bag = bridge::XlaToAtenTensor(offset2bag); | |
auto r_bag_size = bridge::XlaToAtenTensor(bag_size); | |
auto&& x_result = at::_embedding_bag_sparse_backward(r_grad, r_indices, r_offsets, r_offset2bag, r_bag_size, num_weights, scale_grad_by_freq, mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bag_size)); | |
} | |
at::Tensor XLATypeBase::_embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto r_offsets = bridge::XlaToAtenTensor(offsets); | |
auto r_offset2bag = bridge::XlaToAtenTensor(offset2bag); | |
auto r_bag_size = bridge::XlaToAtenTensor(bag_size); | |
auto r_maximum_indices = bridge::XlaToAtenTensor(maximum_indices); | |
auto&& x_result = at::_embedding_bag_dense_backward(r_grad, r_indices, r_offsets, r_offset2bag, r_bag_size, r_maximum_indices, num_weights, scale_grad_by_freq, mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(maximum_indices)); | |
} | |
at::Tensor XLATypeBase::empty(at::IntList size, const at::TensorOptions & options) const { | |
auto&& x_result = at::empty(size, options); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(options)); | |
} | |
at::Tensor & XLATypeBase::resize_(at::Tensor & self, at::IntList size) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::detail::infer_type(w_self).resize_(w_self, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::empty_out(at::Tensor & result, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::empty_out(w_result, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::empty_like(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::empty_like(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::empty_strided(at::IntList size, at::IntList stride, const at::TensorOptions & options) const { | |
auto&& x_result = at::empty_strided(size, stride, options); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(options)); | |
} | |
at::Tensor XLATypeBase::erf(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::erf(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::erf_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::erf_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::erf_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::erf_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::erfc(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::erfc(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::erfc_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::erfc_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::erfc_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::erfc_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::exp(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::exp(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::exp_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::exp_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::exp_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::exp_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::expm1(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::expm1(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::expm1_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::expm1_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::expm1_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::expm1_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::expand(const at::Tensor & self, at::IntList size, bool implicit) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::expand(r_self, size, implicit); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::expand_as(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::expand_as(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::eye_out(at::Tensor & result, int64_t n) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::eye_out(w_result, n); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::eye_out(at::Tensor & result, int64_t n, int64_t m) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::eye_out(w_result, n, m); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::flatten(r_self, start_dim, end_dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::fill_(at::Tensor & self, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::fill_(w_self, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::fill_(at::Tensor & self, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::fill_(w_self, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::floor(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::floor(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::floor_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::floor_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::floor_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::floor_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::full_out(at::Tensor & result, at::IntList size, at::Scalar fill_value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::full_out(w_result, size, fill_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::full_like(const at::Tensor & self, at::Scalar fill_value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::full_like(r_self, fill_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_grid = bridge::XlaToAtenTensor(grid); | |
auto&& x_result = at::grid_sampler(r_input, r_grid, interpolation_mode, padding_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grid)); | |
} | |
at::Tensor XLATypeBase::grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_grid = bridge::XlaToAtenTensor(grid); | |
auto&& x_result = at::grid_sampler_2d(r_input, r_grid, interpolation_mode, padding_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grid)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_grid = bridge::XlaToAtenTensor(grid); | |
auto&& x_result = at::grid_sampler_2d_backward(r_grad_output, r_input, r_grid, interpolation_mode, padding_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(grid)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(grid))); | |
} | |
at::Tensor XLATypeBase::grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_grid = bridge::XlaToAtenTensor(grid); | |
auto&& x_result = at::grid_sampler_3d(r_input, r_grid, interpolation_mode, padding_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grid)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_grid = bridge::XlaToAtenTensor(grid); | |
auto&& x_result = at::grid_sampler_3d_backward(r_grad_output, r_input, r_grid, interpolation_mode, padding_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(grid)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(grid))); | |
} | |
at::Tensor XLATypeBase::hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::hinge_embedding_loss(r_self, r_target, margin, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::ger(const at::Tensor & self, const at::Tensor & vec2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::ger(r_self, r_vec2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ger_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec2 = bridge::XlaToAtenTensor(vec2); | |
auto&& x_result = at::ger_out(w_result, r_self, r_vec2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::gesv(const at::Tensor & self, const at::Tensor & A) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::gesv(r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::gesv_out(at::Tensor & solution, at::Tensor & lu, const at::Tensor & self, const at::Tensor & A) const { | |
auto w_solution = bridge::XlaToAtenMutableTensor(solution); | |
auto w_lu = bridge::XlaToAtenMutableTensor(lu); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::gesv_out(w_solution, w_lu, r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(solution, lu); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_gesv_helper(const at::Tensor & self, const at::Tensor & A) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::_gesv_helper(r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::group_norm(const at::Tensor & input, int64_t num_groups, const at::Tensor & weight, const at::Tensor & bias, double eps, bool cudnn_enabled) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::group_norm(r_input, num_groups, r_weight, r_bias, eps, cudnn_enabled); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::fft(const at::Tensor & self, int64_t signal_ndim, bool normalized) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::fft(r_self, signal_ndim, normalized); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::ifft(const at::Tensor & self, int64_t signal_ndim, bool normalized) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ifft(r_self, signal_ndim, normalized); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::rfft(const at::Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rfft(r_self, signal_ndim, normalized, onesided); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::irfft(const at::Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, at::IntList signal_sizes) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::irfft(r_self, signal_ndim, normalized, onesided, signal_sizes); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_fft_with_size(const at::Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, at::IntList checked_signal_sizes, bool normalized, bool onesided, at::IntList output_sizes) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_fft_with_size(r_self, signal_ndim, complex_input, complex_output, inverse, checked_signal_sizes, normalized, onesided, output_sizes); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
void XLATypeBase::_cufft_set_plan_cache_max_size(int64_t max_size) const { | |
at::_cufft_set_plan_cache_max_size(max_size); | |
} | |
at::Tensor XLATypeBase::index(const at::Tensor & self, at::TensorList indices) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto l_indices = bridge::XlaCreateTensorList(indices); | |
auto&& x_result = at::index(r_self, l_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::native::index_copy_(w_self, dim, r_index, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::index_put(const at::Tensor & self, at::TensorList indices, const at::Tensor & values, bool accumulate) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto l_indices = bridge::XlaCreateTensorList(indices); | |
auto r_values = bridge::XlaToAtenTensor(values); | |
auto&& x_result = at::index_put(r_self, l_indices, r_values, accumulate); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::index_put_(at::Tensor & self, at::TensorList indices, const at::Tensor & values, bool accumulate) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto l_indices = bridge::XlaCreateTensorList(indices); | |
auto r_values = bridge::XlaToAtenTensor(values); | |
auto&& x_result = at::index_put_(w_self, l_indices, r_values, accumulate); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::instance_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto&& x_result = at::instance_norm(r_input, r_weight, r_bias, r_running_mean, r_running_var, use_input_stats, momentum, eps, cudnn_enabled); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(running_var)); | |
} | |
at::Tensor XLATypeBase::inverse(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::inverse(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::inverse_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::inverse_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_inverse_helper(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_inverse_helper(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::isclose(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::isclose(r_self, r_other, rtol, atol, equal_nan); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::isnan(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::isnan(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
bool XLATypeBase::is_distributed(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::is_distributed(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
bool XLATypeBase::is_floating_point(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::is_floating_point(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
bool XLATypeBase::is_complex(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::is_complex(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
bool XLATypeBase::is_nonzero(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::is_nonzero(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
bool XLATypeBase::is_same_size(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::is_same_size(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
bool XLATypeBase::is_signed(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::is_signed(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor XLATypeBase::kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::kl_div(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::kl_div_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::kl_div_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::kthvalue(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::kthvalue(r_self, k, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::kthvalue_out(w_values, w_indices, r_self, k, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
at::Tensor XLATypeBase::layer_norm(const at::Tensor & input, at::IntList normalized_shape, const at::Tensor & weight, const at::Tensor & bias, double eps, bool cudnn_enable) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::layer_norm(r_input, normalized_shape, r_weight, r_bias, eps, cudnn_enable); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::linear(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::linear(r_input, r_weight, r_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
at::Tensor XLATypeBase::fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, at::Scalar weight_scale, at::Scalar weight_zero_point, const at::Tensor & bias) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_packed = bridge::XlaToAtenTensor(packed); | |
auto r_col_offsets = bridge::XlaToAtenTensor(col_offsets); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::fbgemm_linear_int8_weight(r_input, r_weight, r_packed, r_col_offsets, weight_scale, weight_zero_point, r_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(bias)); | |
} | |
std::tuple<at::Tensor,at::Tensor,double,int64_t> XLATypeBase::fbgemm_linear_quantize_weight(const at::Tensor & input) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto&& x_result = at::fbgemm_linear_quantize_weight(r_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,double,int64_t>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(input)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(input)), std::get<2>(x_result), std::get<3>(x_result)); | |
} | |
at::Tensor XLATypeBase::fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto&& x_result = at::fbgemm_pack_quantized_matrix(r_input, K, N); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(input)); | |
} | |
at::Tensor & XLATypeBase::linspace_out(at::Tensor & result, at::Scalar start, at::Scalar end, int64_t steps) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::linspace_out(w_result, start, end, steps); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::log(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::log_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::log_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::log_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::log10(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log10(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::log10_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::log10_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::log10_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log10_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::log1p(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log1p(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::log1p_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::log1p_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::log1p_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log1p_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::log2(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log2(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::log2_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::log2_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::log2_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log2_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::logdet(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::logdet(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::logspace_out(at::Tensor & result, at::Scalar start, at::Scalar end, int64_t steps) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::logspace_out(w_result, start, end, steps); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::log_softmax(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_softmax(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::log_softmax(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_softmax(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_log_softmax(r_self, dim, half_to_float); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_log_softmax_backward_data(r_grad_output, r_output, dim, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::logsumexp(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::logsumexp(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::logsumexp_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::logsumexp_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) const { | |
auto r_input1 = bridge::XlaToAtenTensor(input1); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::margin_ranking_loss(r_input1, r_input2, r_target, margin, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(target)); | |
} | |
at::Tensor XLATypeBase::matmul(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::matmul(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::matmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::matmul_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::matrix_rank(const at::Tensor & self, double tol, bool symmetric) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::matrix_rank(r_self, tol, symmetric); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::matrix_rank(const at::Tensor & self, bool symmetric) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::matrix_rank(r_self, symmetric); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::matrix_power(const at::Tensor & self, int64_t n) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::matrix_power(r_self, n); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::max(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_max = bridge::XlaToAtenMutableTensor(max); | |
auto w_max_values = bridge::XlaToAtenMutableTensor(max_values); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_out(w_max, w_max_values, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(max, max_values); | |
} | |
at::Tensor XLATypeBase::max_values(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_values(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::max_pool1d_with_indices(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool1d_with_indices(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::max_pool1d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool1d(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::max_pool2d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool2d(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::max_pool3d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool3d(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mean(const at::Tensor & self, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean(r_self, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mean(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mean(const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean(r_self, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mean(const at::Tensor & self, at::IntList dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mean(const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mean_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean_out(w_result, r_self, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::mean_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::mean_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mean_out(w_result, r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::median(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::median(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::median_out(w_values, w_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::min(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::min(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_min = bridge::XlaToAtenMutableTensor(min); | |
auto w_min_indices = bridge::XlaToAtenMutableTensor(min_indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::min_out(w_min, w_min_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(min, min_indices); | |
} | |
at::Tensor XLATypeBase::min_values(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::min_values(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::mkldnn_convolution(r_self, r_weight, r_bias, padding, stride, dilation, groups); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mkldnn_convolution_backward_input(at::IntList self_size, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool bias_defined) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::mkldnn_convolution_backward_input(self_size, r_grad_output, r_weight, padding, stride, dilation, groups, bias_defined); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(weight)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::mkldnn_convolution_backward_weights(at::IntList weight_size, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool bias_defined) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mkldnn_convolution_backward_weights(weight_size, r_grad_output, r_self, padding, stride, dilation, groups, bias_defined); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::mkldnn_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, std::array<bool,3> output_mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::mkldnn_convolution_backward(r_self, r_grad_output, r_weight, padding, stride, dilation, groups, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double exponential_average_factor, double epsilon) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto&& x_result = at::miopen_batch_norm(r_input, r_weight, r_bias, r_running_mean, r_running_var, training, exponential_average_factor, epsilon); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(running_var)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(running_var)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(running_var))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::miopen_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const at::Tensor & running_mean, const at::Tensor & running_var, const at::Tensor & save_mean, const at::Tensor & save_var, double epsilon) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto r_save_mean = bridge::XlaToAtenTensor(save_mean); | |
auto r_save_var = bridge::XlaToAtenTensor(save_var); | |
auto&& x_result = at::miopen_batch_norm_backward(r_input, r_grad_output, r_weight, r_running_mean, r_running_var, r_save_mean, r_save_var, epsilon); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(save_var)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(save_var)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(save_var))); | |
} | |
at::Tensor XLATypeBase::miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::miopen_convolution(r_self, r_weight, r_bias, padding, stride, dilation, groups, benchmark, deterministic); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::miopen_convolution_backward_input(at::IntList self_size, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::miopen_convolution_backward_input(self_size, r_grad_output, r_weight, padding, stride, dilation, groups, benchmark, deterministic); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(weight)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::miopen_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::miopen_convolution_backward(r_self, r_grad_output, r_weight, padding, stride, dilation, groups, benchmark, deterministic, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::miopen_convolution_backward_bias(const at::Tensor & grad_output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::miopen_convolution_backward_bias(r_grad_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor XLATypeBase::miopen_convolution_backward_weight(at::IntList weight_size, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::miopen_convolution_backward_weight(weight_size, r_grad_output, r_self, padding, stride, dilation, groups, benchmark, deterministic); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, at::IntList padding, at::IntList output_padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::miopen_convolution_transpose(r_self, r_weight, r_bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::miopen_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList output_padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::miopen_convolution_transpose_backward(r_self, r_grad_output, r_weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::miopen_convolution_transpose_backward_input(const at::Tensor & grad_output, const at::Tensor & weight, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::miopen_convolution_transpose_backward_input(r_grad_output, r_weight, padding, stride, dilation, groups, benchmark, deterministic); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(weight)); | |
} | |
at::Tensor XLATypeBase::miopen_convolution_transpose_backward_weight(at::IntList weight_size, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding, at::IntList stride, at::IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::miopen_convolution_transpose_backward_weight(weight_size, r_grad_output, r_self, padding, stride, dilation, groups, benchmark, deterministic); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::mm(const at::Tensor & self, const at::Tensor & mat2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::mm(r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::mm_out(w_result, r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::_sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) const { | |
auto r_sparse = bridge::XlaToAtenTensor(sparse); | |
auto r_dense = bridge::XlaToAtenTensor(dense); | |
auto&& x_result = at::_sparse_mm(r_sparse, r_dense); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(dense)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::mode(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mode(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mode_out(w_values, w_indices, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
at::Tensor XLATypeBase::mul(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::mul(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mul_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::mul_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::mul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::mul_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::mul(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mul(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mul_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::mul_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::mv(const at::Tensor & self, const at::Tensor & vec) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::mv(r_self, r_vec); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & vec) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_vec = bridge::XlaToAtenTensor(vec); | |
auto&& x_result = at::mv_out(w_result, r_self, r_vec); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::mvlgamma(const at::Tensor & self, int64_t p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::mvlgamma(r_self, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mvlgamma_(at::Tensor & self, int64_t p) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::mvlgamma_(w_self, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).narrow_copy(r_self, dim, start, length); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::narrow(r_self, dim, start, length); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::native_batch_norm(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto&& x_result = at::native_batch_norm(r_input, r_weight, r_bias, r_running_mean, r_running_var, training, momentum, eps); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(running_var)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(running_var)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(running_var))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & running_mean, const at::Tensor & running_var, const at::Tensor & save_mean, const at::Tensor & save_invstd, bool train, double eps, std::array<bool,3> output_mask) const { | |
auto r_grad_out = bridge::XlaToAtenTensor(grad_out); | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto r_save_mean = bridge::XlaToAtenTensor(save_mean); | |
auto r_save_invstd = bridge::XlaToAtenTensor(save_invstd); | |
auto&& x_result = at::native_batch_norm_backward(r_grad_out, r_input, r_weight, r_running_mean, r_running_var, r_save_mean, r_save_invstd, train, eps, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(save_invstd)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(save_invstd)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(save_invstd))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::batch_norm_update_stats(const at::Tensor & input, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_running_mean = bridge::XlaToAtenTensor(running_mean); | |
auto r_running_var = bridge::XlaToAtenTensor(running_var); | |
auto&& x_result = at::batch_norm_update_stats(r_input, r_running_mean, r_running_var, momentum); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(running_var)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(running_var))); | |
} | |
at::Tensor & XLATypeBase::ones_out(at::Tensor & result, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::ones_out(w_result, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ones_like(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ones_like(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) const { | |
auto r_x1 = bridge::XlaToAtenTensor(x1); | |
auto r_x2 = bridge::XlaToAtenTensor(x2); | |
auto&& x_result = at::pairwise_distance(r_x1, r_x2, p, eps, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(x2)); | |
} | |
at::Tensor XLATypeBase::pdist(const at::Tensor & self, double p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pdist(r_self, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_pdist_forward(const at::Tensor & self, double p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_pdist_forward(r_self, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_pdist = bridge::XlaToAtenTensor(pdist); | |
auto&& x_result = at::_pdist_backward(r_grad, r_self, p, r_pdist); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) const { | |
auto r_x1 = bridge::XlaToAtenTensor(x1); | |
auto r_x2 = bridge::XlaToAtenTensor(x2); | |
auto&& x_result = at::cosine_similarity(r_x1, r_x2, dim, eps); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(x2)); | |
} | |
at::Tensor XLATypeBase::permute(const at::Tensor & self, at::IntList dims) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::permute(r_self, dims); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pixel_shuffle(r_self, upscale_factor); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::pin_memory(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pin_memory(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::pinverse(const at::Tensor & self, double rcond) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pinverse(r_self, rcond); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::rand_out(at::Tensor & result, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::rand_out(w_result, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::rand_out(at::Tensor & result, at::IntList size, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::rand_out(w_result, size, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::rand_like(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rand_like(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::randint_out(at::Tensor & result, int64_t high, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randint_out(w_result, high, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::randint_out(at::Tensor & result, int64_t high, at::IntList size, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randint_out(w_result, high, size, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::randint_out(at::Tensor & result, int64_t low, int64_t high, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randint_out(w_result, low, high, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::randint_out(at::Tensor & result, int64_t low, int64_t high, at::IntList size, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randint_out(w_result, low, high, size, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::randint_like(const at::Tensor & self, int64_t high) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::randint_like(r_self, high); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::randint_like(const at::Tensor & self, int64_t low, int64_t high) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::randint_like(r_self, low, high); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::randn_out(at::Tensor & result, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randn_out(w_result, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::randn_out(at::Tensor & result, at::IntList size, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randn_out(w_result, size, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::randn_like(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::randn_like(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::randperm_out(at::Tensor & result, int64_t n) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randperm_out(w_result, n); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::randperm_out(at::Tensor & result, int64_t n, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::randperm_out(w_result, n, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::range_out(at::Tensor & result, at::Scalar start, at::Scalar end, at::Scalar step) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::range_out(w_result, start, end, step); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::repeat(const at::Tensor & self, at::IntList repeats) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::repeat(r_self, repeats); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::reshape(const at::Tensor & self, at::IntList shape) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reshape(r_self, shape); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::reshape_as(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::reshape_as(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::RoiPooling2d_forward(const at::Tensor & input, const at::Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_rois = bridge::XlaToAtenTensor(rois); | |
auto&& x_result = at::RoiPooling2d_forward(r_input, r_rois, pooledHeight, pooledWidth, spatialScale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(rois)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(rois))); | |
} | |
at::Tensor XLATypeBase::RoiPooling2d_backward(const at::Tensor & input, const at::Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale, const at::Tensor & gradOutput, const at::Tensor & argmaxes) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_rois = bridge::XlaToAtenTensor(rois); | |
auto r_gradOutput = bridge::XlaToAtenTensor(gradOutput); | |
auto r_argmaxes = bridge::XlaToAtenTensor(argmaxes); | |
auto&& x_result = at::RoiPooling2d_backward(r_input, r_rois, pooledHeight, pooledWidth, spatialScale, r_gradOutput, r_argmaxes); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(argmaxes)); | |
} | |
at::Tensor XLATypeBase::round(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::round(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::round_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::round_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::round_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::round_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::rrelu(const at::Tensor & self, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rrelu(r_self, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::rrelu_(at::Tensor & self, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::rrelu_(w_self, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::relu(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::relu(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::relu_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::relu_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::prelu(const at::Tensor & self, const at::Tensor & weight) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::prelu(r_self, r_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::prelu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::prelu_backward(r_grad_output, r_self, r_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::hardshrink(const at::Tensor & self, at::Scalar lambd) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::hardshrink(r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, at::Scalar lambd) const { | |
auto r_grad_out = bridge::XlaToAtenTensor(grad_out); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::hardshrink_backward(r_grad_out, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::rsqrt(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rsqrt(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::rsqrt_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::rsqrt_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::rsqrt_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rsqrt_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::select(const at::Tensor & self, int64_t dim, int64_t index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::select(r_self, dim, index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::selu(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::selu(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::selu_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::selu_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::celu(const at::Tensor & self, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::celu(r_self, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::celu_(at::Tensor & self, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::celu_(w_self, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::sigmoid(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sigmoid(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sigmoid_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::sigmoid_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sigmoid_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sigmoid_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::sin(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sin(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sin_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::sin_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sin_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sin_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::sinh(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sinh(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sinh_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::sinh_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sinh_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sinh_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::detach(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detach(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::detach_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::detach_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
int64_t XLATypeBase::size(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::size(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor XLATypeBase::slice(const at::Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::slice(r_self, dim, start, end, step); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::slogdet(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::slogdet(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::smm(const at::Tensor & self, const at::Tensor & mat2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::smm(r_self, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::softmax(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softmax(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::softmax(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softmax(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_softmax(r_self, dim, half_to_float); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_softmax_backward_data(r_grad_output, r_output, dim, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_sparse_add_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_sparse_add_out(w_result, r_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::_sparse_dense_add_out(at::Tensor & result, const at::Tensor & self, at::SparseTensorRef other, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_dense_add_out(w_result, r_self, other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::_sparse_div_zerodim_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_sparse_div_zerodim_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::_sparse_div_scalar_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_div_scalar_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::_sparse_mul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_sparse_mul_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::_sparse_mul_zerodim_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_sparse_mul_zerodim_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::_sparse_mul_scalar_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_mul_scalar_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
std::vector<at::Tensor> XLATypeBase::split(const at::Tensor & self, int64_t split_size, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::split(r_self, split_size, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensors(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::vector<at::Tensor> XLATypeBase::split_with_sizes(const at::Tensor & self, at::IntList split_sizes, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::split_with_sizes(r_self, split_sizes, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensors(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::squeeze(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::squeeze(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::squeeze(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::squeeze(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::squeeze_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::squeeze_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::squeeze_(at::Tensor & self, int64_t dim) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::squeeze_(w_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::sspaddmm(r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sspaddmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::sspaddmm_out(w_result, r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::stack(at::TensorList tensors, int64_t dim) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::stack(l_tensors, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor & XLATypeBase::stack_out(at::Tensor & result, at::TensorList tensors, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::stack_out(w_result, l_tensors, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const at::Tensor & window, bool normalized, bool onesided) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_window = bridge::XlaToAtenTensor(window); | |
auto&& x_result = at::stft(r_self, n_fft, hop_length, win_length, r_window, normalized, onesided); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
int64_t XLATypeBase::stride(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::stride(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor XLATypeBase::sum(const at::Tensor & self, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum(r_self, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::sum(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::sum(const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum(r_self, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::sum(const at::Tensor & self, at::IntList dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::sum(const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sum_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum_out(w_result, r_self, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::sum_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::sum_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sum_out(w_result, r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::sum_to_size(const at::Tensor & self, at::IntList size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::sum_to_size(r_self, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::sqrt(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sqrt(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sqrt_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::sqrt_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sqrt_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sqrt_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::std(const at::Tensor & self, bool unbiased) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::std(r_self, unbiased); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::std(const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::std(r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::std_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::std_out(w_result, r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::prod(const at::Tensor & self, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod(r_self, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::prod(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::prod(const at::Tensor & self, int64_t dim, bool keepdim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod(r_self, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::prod(const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::prod(const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::prod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod_out(w_result, r_self, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::prod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::prod_out(at::Tensor & result, const at::Tensor & self, int64_t dim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::prod_out(w_result, r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::t(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::t(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::t_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::t_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::tan(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::tan(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::tan_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::tan_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::tan_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::tan_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::tanh(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::tanh(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::tanh_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::tanh_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::tanh_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::tanh_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::tensordot(const at::Tensor & self, const at::Tensor & other, at::IntList dims_self, at::IntList dims_other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::tensordot(r_self, r_other, dims_self, dims_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::threshold(const at::Tensor & self, at::Scalar threshold, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::threshold(r_self, threshold, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::threshold_(at::Tensor & self, at::Scalar threshold, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::threshold_(w_self, threshold, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::threshold_out(at::Tensor & result, const at::Tensor & self, at::Scalar threshold, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::threshold_out(w_result, r_self, threshold, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar threshold) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::threshold_backward(r_grad_output, r_self, threshold); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::transpose(r_self, dim0, dim1); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::transpose_(w_self, dim0, dim1); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::one_hot(const at::Tensor & self, int64_t num_classes) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::one_hot(r_self, num_classes); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::flip(const at::Tensor & self, at::IntList dims) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::flip(r_self, dims); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::roll(const at::Tensor & self, at::IntList shifts, at::IntList dims) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::roll(r_self, shifts, dims); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::rot90(const at::Tensor & self, int64_t k, at::IntList dims) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rot90(r_self, k, dims); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_trilinear(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntList expand1, at::IntList expand2, at::IntList expand3, at::IntList sumdim, int64_t unroll_dim) const { | |
auto r_i1 = bridge::XlaToAtenTensor(i1); | |
auto r_i2 = bridge::XlaToAtenTensor(i2); | |
auto r_i3 = bridge::XlaToAtenTensor(i3); | |
auto&& x_result = at::_trilinear(r_i1, r_i2, r_i3, expand1, expand2, expand3, sumdim, unroll_dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(i3)); | |
} | |
at::Tensor XLATypeBase::triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) const { | |
auto r_anchor = bridge::XlaToAtenTensor(anchor); | |
auto r_positive = bridge::XlaToAtenTensor(positive); | |
auto r_negative = bridge::XlaToAtenTensor(negative); | |
auto&& x_result = at::triplet_margin_loss(r_anchor, r_positive, r_negative, margin, p, eps, swap, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(negative)); | |
} | |
at::Tensor XLATypeBase::trunc(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::trunc(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::trunc_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::trunc_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::trunc_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::trunc_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::type_as(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::type_as(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_unique(const at::Tensor & self, bool sorted, bool return_inverse) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_unique(r_self, sorted, return_inverse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_unique_dim(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_unique_dim(r_self, dim, sorted, return_inverse); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::_unsafe_view(const at::Tensor & self, at::IntList size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_unsafe_view(r_self, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::unsqueeze(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::unsqueeze(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::unsqueeze_(at::Tensor & self, int64_t dim) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::unsqueeze_(w_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::var(const at::Tensor & self, bool unbiased) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::var(r_self, unbiased); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::var(const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::var(r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::var_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool unbiased, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::var_out(w_result, r_self, dim, unbiased, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::view_as(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::view_as(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) const { | |
auto r_condition = bridge::XlaToAtenTensor(condition); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::where(r_condition, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_s_where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) const { | |
auto r_condition = bridge::XlaToAtenTensor(condition); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::_s_where(r_condition, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::norm_except_dim(const at::Tensor & v, int64_t pow, int64_t dim) const { | |
auto r_v = bridge::XlaToAtenTensor(v); | |
auto&& x_result = at::norm_except_dim(r_v, pow, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(v)); | |
} | |
at::Tensor XLATypeBase::_weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim) const { | |
auto r_v = bridge::XlaToAtenTensor(v); | |
auto r_g = bridge::XlaToAtenTensor(g); | |
auto&& x_result = at::_weight_norm(r_v, r_g, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(g)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_weight_norm_cuda_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim) const { | |
auto r_v = bridge::XlaToAtenTensor(v); | |
auto r_g = bridge::XlaToAtenTensor(g); | |
auto&& x_result = at::_weight_norm_cuda_interface(r_v, r_g, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(g)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(g))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_weight_norm_cuda_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) const { | |
auto r_grad_w = bridge::XlaToAtenTensor(grad_w); | |
auto r_saved_v = bridge::XlaToAtenTensor(saved_v); | |
auto r_saved_g = bridge::XlaToAtenTensor(saved_g); | |
auto r_saved_norms = bridge::XlaToAtenTensor(saved_norms); | |
auto&& x_result = at::_weight_norm_cuda_interface_backward(r_grad_w, r_saved_v, r_saved_g, r_saved_norms, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(saved_norms)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(saved_norms))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) const { | |
auto r_grad_w = bridge::XlaToAtenTensor(grad_w); | |
auto r_saved_v = bridge::XlaToAtenTensor(saved_v); | |
auto r_saved_g = bridge::XlaToAtenTensor(saved_g); | |
auto r_saved_norms = bridge::XlaToAtenTensor(saved_norms); | |
auto&& x_result = at::_weight_norm_differentiable_backward(r_grad_w, r_saved_v, r_saved_g, r_saved_norms, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(saved_norms)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(saved_norms))); | |
} | |
at::Tensor & XLATypeBase::zeros_out(at::Tensor & result, at::IntList size) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto&& x_result = at::zeros_out(w_result, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::zeros_like(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::zeros_like(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::_standard_gamma_grad(r_self, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_standard_gamma(const at::Tensor & self, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_standard_gamma(r_self, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::poisson(const at::Tensor & self, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::poisson(r_self, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::native_norm(const at::Tensor & self, at::Scalar p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native_norm(r_self, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_sparse_sum(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_sum(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_sparse_sum(const at::Tensor & self, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_sum(r_self, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_sparse_sum(const at::Tensor & self, at::IntList dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_sum(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_sparse_sum(const at::Tensor & self, at::IntList dim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_sum(r_self, dim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntList dim) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_sparse_sum_backward(r_grad, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::norm(const at::Tensor & self, c10::optional<at::Scalar> p, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::norm(r_self, p, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::norm(const at::Tensor & self, at::Scalar p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::norm(r_self, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::norm(const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim, at::ScalarType dtype) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::norm(r_self, p, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::norm(const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::norm(r_self, p, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::norm_out(at::Tensor & result, const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim, at::ScalarType dtype) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::norm_out(w_result, r_self, p, dim, keepdim, dtype); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor & XLATypeBase::norm_out(at::Tensor & result, const at::Tensor & self, c10::optional<at::Scalar> p, at::IntList dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::norm_out(w_result, r_self, p, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::frobenius_norm(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::frobenius_norm(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::frobenius_norm(const at::Tensor & self, at::IntList dim, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::frobenius_norm(r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::frobenius_norm_out(at::Tensor & result, const at::Tensor & self, at::IntList dim, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::frobenius_norm_out(w_result, r_self, dim, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::nuclear_norm(const at::Tensor & self, bool keepdim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::nuclear_norm(r_self, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::nuclear_norm_out(at::Tensor & result, const at::Tensor & self, bool keepdim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::nuclear_norm_out(w_result, r_self, keepdim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::native_clone(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native_clone(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::clone(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::clone(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::native_resize_as_(at::Tensor & self, const at::Tensor & the_template) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_the_template = bridge::XlaToAtenTensor(the_template); | |
auto&& x_result = at::native_resize_as_(w_self, r_the_template); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::resize_as_(at::Tensor & self, const at::Tensor & the_template) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_the_template = bridge::XlaToAtenTensor(the_template); | |
auto&& x_result = at::resize_as_(w_self, r_the_template); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::native_pow_out(at::Tensor & result, const at::Tensor & self, at::Scalar exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native_pow_out(w_result, r_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::native_pow(const at::Tensor & self, at::Scalar exponent) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native_pow(r_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::pow_out(at::Tensor & result, const at::Tensor & self, at::Scalar exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pow_out(w_result, r_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::pow(const at::Tensor & self, at::Scalar exponent) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pow(r_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::native_zero_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native_zero_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::zero_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::zero_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sub_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::sub_out(w_result, r_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::sub(const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::sub(r_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sub_(at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::sub_(w_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::sub(const at::Tensor & self, at::Scalar other, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sub(r_self, other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sub_(at::Tensor & self, at::Scalar other, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::sub_(w_self, other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::rsub(const at::Tensor & self, const at::Tensor & other, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::rsub(r_self, r_other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::rsub(const at::Tensor & self, at::Scalar other, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::rsub(r_self, other, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::s_native_addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::s_native_addmm_out(w_result, r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::s_native_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::s_native_addmm(r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::s_native_addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::s_native_addmm_(w_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::_sparse_addmm(const at::Tensor & self, const at::Tensor & sparse, const at::Tensor & dense, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_sparse = bridge::XlaToAtenTensor(sparse); | |
auto r_dense = bridge::XlaToAtenTensor(dense); | |
auto&& x_result = at::_sparse_addmm(r_self, r_sparse, r_dense, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::addmm_out(w_result, r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::addmm(r_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::native::addmm_(w_self, r_mat1, r_mat2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::_sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntList size, const at::TensorOptions & options) const { | |
auto&& x_result = at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, options); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(options)); | |
} | |
at::Tensor XLATypeBase::_sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntList size, const at::Tensor & indices, const at::Tensor & values, const at::TensorOptions & options) const { | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto r_values = bridge::XlaToAtenTensor(values); | |
auto&& x_result = at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, r_indices, r_values, options); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(values)); | |
} | |
at::Tensor & XLATypeBase::sparse_resize_(at::Tensor & self, at::IntList size, int64_t sparse_dim, int64_t dense_dim) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::sparse_resize_(w_self, size, sparse_dim, dense_dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sparse_resize_and_clear_(at::Tensor & self, at::IntList size, int64_t sparse_dim, int64_t dense_dim) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::sparse_resize_and_clear_(w_self, size, sparse_dim, dense_dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::sparse_mask(const at::Tensor & self, at::SparseTensorRef mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).sparse_mask(r_self, mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to_dense(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).to_dense(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
int64_t XLATypeBase::sparse_dim(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).sparse_dim(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
int64_t XLATypeBase::_dimI(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self)._dimI(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
int64_t XLATypeBase::dense_dim(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).dense_dim(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
int64_t XLATypeBase::_dimV(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self)._dimV(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
int64_t XLATypeBase::_nnz(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self)._nnz(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor XLATypeBase::coalesce(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).coalesce(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
bool XLATypeBase::is_coalesced(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).is_coalesced(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor XLATypeBase::_indices(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self)._indices(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_values(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self)._values(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_coalesced_(at::Tensor & self, bool coalesced) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::detail::infer_type(w_self)._coalesced_(w_self, coalesced); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::indices(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).indices(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::values(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).values(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::hspmm_out(at::Tensor & result, const at::Tensor & mat1, const at::Tensor & mat2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::hspmm_out(w_result, r_mat1, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::hspmm(const at::Tensor & mat1, const at::Tensor & mat2) const { | |
auto r_mat1 = bridge::XlaToAtenTensor(mat1); | |
auto r_mat2 = bridge::XlaToAtenTensor(mat2); | |
auto&& x_result = at::hspmm(r_mat1, r_mat2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(mat2)); | |
} | |
at::Tensor & XLATypeBase::copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::copy_sparse_to_sparse_(w_self, r_src, non_blocking); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
int64_t XLATypeBase::numel(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::numel(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
std::vector<at::Tensor> XLATypeBase::unbind(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::unbind(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensors(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to_sparse(const at::Tensor & self, int64_t sparse_dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).to_sparse(r_self, sparse_dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to_sparse(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::detail::infer_type(r_self).to_sparse(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to(const at::Tensor & self, const at::TensorOptions & options, bool non_blocking, bool copy) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::to(r_self, options, non_blocking, copy); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to(const at::Tensor & self, c10::Device device, at::ScalarType dtype, bool non_blocking, bool copy) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::to(r_self, device, dtype, non_blocking, copy); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::to(r_self, dtype, non_blocking, copy); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::to(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::to(r_self, r_other, non_blocking, copy); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::vector<at::Tensor> XLATypeBase::meshgrid(at::TensorList tensors) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::meshgrid(l_tensors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensors(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor XLATypeBase::cartesian_prod(at::TensorList tensors) const { | |
auto l_tensors = bridge::XlaCreateTensorList(tensors); | |
auto&& x_result = at::cartesian_prod(l_tensors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(tensors)); | |
} | |
at::Tensor XLATypeBase::combinations(const at::Tensor & self, int64_t r, bool with_replacement) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::combinations(r_self, r, with_replacement); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Scalar XLATypeBase::item(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::item(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Scalar XLATypeBase::_local_scalar_dense(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_local_scalar_dense(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_fused_lstm_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const at::Tensor & input_bias, const at::Tensor & hidden_bias) const { | |
auto r_input_gates = bridge::XlaToAtenTensor(input_gates); | |
auto r_hidden_gates = bridge::XlaToAtenTensor(hidden_gates); | |
auto r_cx = bridge::XlaToAtenTensor(cx); | |
auto r_input_bias = bridge::XlaToAtenTensor(input_bias); | |
auto r_hidden_bias = bridge::XlaToAtenTensor(hidden_bias); | |
auto&& x_result = at::_thnn_fused_lstm_cell(r_input_gates, r_hidden_gates, r_cx, r_input_bias, r_hidden_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hidden_bias)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hidden_bias)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(hidden_bias))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_fused_lstm_cell_backward(const at::Tensor & grad_hy, const at::Tensor & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) const { | |
auto r_grad_hy = bridge::XlaToAtenTensor(grad_hy); | |
auto r_grad_cy = bridge::XlaToAtenTensor(grad_cy); | |
auto r_cx = bridge::XlaToAtenTensor(cx); | |
auto r_cy = bridge::XlaToAtenTensor(cy); | |
auto r_workspace = bridge::XlaToAtenTensor(workspace); | |
auto&& x_result = at::_thnn_fused_lstm_cell_backward(r_grad_hy, r_grad_cy, r_cx, r_cy, r_workspace, has_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<3>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<4>(x_result), bridge::XlaTensorDevice(workspace))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_thnn_fused_gru_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const at::Tensor & input_bias, const at::Tensor & hidden_bias) const { | |
auto r_input_gates = bridge::XlaToAtenTensor(input_gates); | |
auto r_hidden_gates = bridge::XlaToAtenTensor(hidden_gates); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_input_bias = bridge::XlaToAtenTensor(input_bias); | |
auto r_hidden_bias = bridge::XlaToAtenTensor(hidden_bias); | |
auto&& x_result = at::_thnn_fused_gru_cell(r_input_gates, r_hidden_gates, r_hx, r_input_bias, r_hidden_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hidden_bias)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hidden_bias))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> XLATypeBase::_thnn_fused_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) const { | |
auto r_grad_hy = bridge::XlaToAtenTensor(grad_hy); | |
auto r_workspace = bridge::XlaToAtenTensor(workspace); | |
auto&& x_result = at::_thnn_fused_gru_cell_backward(r_grad_hy, r_workspace, has_bias); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<3>(x_result), bridge::XlaTensorDevice(workspace)), bridge::CreateXlaTensor(std::get<4>(x_result), bridge::XlaTensorDevice(workspace))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto l_hx = bridge::XlaCreateTensorList(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::lstm(r_input, l_hx, l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(input)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(input)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(input))); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const { | |
auto r_data = bridge::XlaToAtenTensor(data); | |
auto r_batch_sizes = bridge::XlaToAtenTensor(batch_sizes); | |
auto l_hx = bridge::XlaCreateTensorList(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::lstm(r_data, r_batch_sizes, l_hx, l_params, has_biases, num_layers, dropout, train, bidirectional); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(batch_sizes)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(batch_sizes)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(batch_sizes))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::gru(r_input, r_hx, l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hx)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hx))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const { | |
auto r_data = bridge::XlaToAtenTensor(data); | |
auto r_batch_sizes = bridge::XlaToAtenTensor(batch_sizes); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::gru(r_data, r_batch_sizes, r_hx, l_params, has_biases, num_layers, dropout, train, bidirectional); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hx)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hx))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::rnn_tanh(r_input, r_hx, l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hx)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hx))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const { | |
auto r_data = bridge::XlaToAtenTensor(data); | |
auto r_batch_sizes = bridge::XlaToAtenTensor(batch_sizes); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::rnn_tanh(r_data, r_batch_sizes, r_hx, l_params, has_biases, num_layers, dropout, train, bidirectional); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hx)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hx))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::rnn_relu(r_input, r_hx, l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hx)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hx))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) const { | |
auto r_data = bridge::XlaToAtenTensor(data); | |
auto r_batch_sizes = bridge::XlaToAtenTensor(batch_sizes); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::rnn_relu(r_data, r_batch_sizes, r_hx, l_params, has_biases, num_layers, dropout, train, bidirectional); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(hx)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(hx))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto l_hx = bridge::XlaCreateTensorList(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto&& x_result = at::lstm_cell(r_input, l_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(b_hh)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(b_hh))); | |
} | |
at::Tensor XLATypeBase::gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto&& x_result = at::gru_cell(r_input, r_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(b_hh)); | |
} | |
at::Tensor XLATypeBase::rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto&& x_result = at::rnn_tanh_cell(r_input, r_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(b_hh)); | |
} | |
at::Tensor XLATypeBase::rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto&& x_result = at::rnn_relu_cell(r_input, r_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(b_hh)); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::quantized_lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto l_hx = bridge::XlaCreateTensorList(hx); | |
auto l_params = bridge::XlaCreateTensorList(params); | |
auto&& x_result = at::quantized_lstm(r_input, l_hx, l_params, has_biases, num_layers, dropout, train, bidirectional, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(input)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(input)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(input))); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto l_hx = bridge::XlaCreateTensorList(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto r_packed_ih = bridge::XlaToAtenTensor(packed_ih); | |
auto r_packed_hh = bridge::XlaToAtenTensor(packed_hh); | |
auto r_col_offsets_ih = bridge::XlaToAtenTensor(col_offsets_ih); | |
auto r_col_offsets_hh = bridge::XlaToAtenTensor(col_offsets_hh); | |
auto&& x_result = at::quantized_lstm_cell(r_input, l_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh, r_packed_ih, r_packed_hh, r_col_offsets_ih, r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(col_offsets_hh)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(col_offsets_hh))); | |
} | |
at::Tensor XLATypeBase::quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto r_packed_ih = bridge::XlaToAtenTensor(packed_ih); | |
auto r_packed_hh = bridge::XlaToAtenTensor(packed_hh); | |
auto r_col_offsets_ih = bridge::XlaToAtenTensor(col_offsets_ih); | |
auto r_col_offsets_hh = bridge::XlaToAtenTensor(col_offsets_hh); | |
auto&& x_result = at::quantized_gru_cell(r_input, r_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh, r_packed_ih, r_packed_hh, r_col_offsets_ih, r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(col_offsets_hh)); | |
} | |
at::Tensor XLATypeBase::quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto r_packed_ih = bridge::XlaToAtenTensor(packed_ih); | |
auto r_packed_hh = bridge::XlaToAtenTensor(packed_hh); | |
auto r_col_offsets_ih = bridge::XlaToAtenTensor(col_offsets_ih); | |
auto r_col_offsets_hh = bridge::XlaToAtenTensor(col_offsets_hh); | |
auto&& x_result = at::quantized_rnn_relu_cell(r_input, r_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh, r_packed_ih, r_packed_hh, r_col_offsets_ih, r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(col_offsets_hh)); | |
} | |
at::Tensor XLATypeBase::quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, at::Scalar scale_ih, at::Scalar scale_hh, at::Scalar zero_point_ih, at::Scalar zero_point_hh) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_hx = bridge::XlaToAtenTensor(hx); | |
auto r_w_ih = bridge::XlaToAtenTensor(w_ih); | |
auto r_w_hh = bridge::XlaToAtenTensor(w_hh); | |
auto r_b_ih = bridge::XlaToAtenTensor(b_ih); | |
auto r_b_hh = bridge::XlaToAtenTensor(b_hh); | |
auto r_packed_ih = bridge::XlaToAtenTensor(packed_ih); | |
auto r_packed_hh = bridge::XlaToAtenTensor(packed_hh); | |
auto r_col_offsets_ih = bridge::XlaToAtenTensor(col_offsets_ih); | |
auto r_col_offsets_hh = bridge::XlaToAtenTensor(col_offsets_hh); | |
auto&& x_result = at::quantized_rnn_tanh_cell(r_input, r_hx, r_w_ih, r_w_hh, r_b_ih, r_b_hh, r_packed_ih, r_packed_hh, r_col_offsets_ih, r_col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(col_offsets_hh)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_pack_padded_sequence(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) const { | |
auto r_input = bridge::XlaToAtenTensor(input); | |
auto r_lengths = bridge::XlaToAtenTensor(lengths); | |
auto&& x_result = at::_pack_padded_sequence(r_input, r_lengths, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(lengths)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(lengths))); | |
} | |
at::Tensor XLATypeBase::_pack_padded_sequence_backward(const at::Tensor & grad, at::IntList input_size, const at::Tensor & batch_sizes, bool batch_first) const { | |
auto r_grad = bridge::XlaToAtenTensor(grad); | |
auto r_batch_sizes = bridge::XlaToAtenTensor(batch_sizes); | |
auto&& x_result = at::_pack_padded_sequence_backward(r_grad, input_size, r_batch_sizes, batch_first); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(batch_sizes)); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::_pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, at::Scalar padding_value, int64_t total_length) const { | |
auto r_data = bridge::XlaToAtenTensor(data); | |
auto r_batch_sizes = bridge::XlaToAtenTensor(batch_sizes); | |
auto&& x_result = at::_pad_packed_sequence(r_data, r_batch_sizes, batch_first, padding_value, total_length); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(batch_sizes)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(batch_sizes))); | |
} | |
void* XLATypeBase::data_ptr(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::data_ptr(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor & XLATypeBase::set_(at::Tensor & self, at::Storage source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::set_(w_self, source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntList size, at::IntList stride) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::set_(w_self, source, storage_offset, size, stride); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::set_(at::Tensor & self, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::native::set_(w_self, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::set_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::set_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
bool XLATypeBase::is_set_to(const at::Tensor & self, const at::Tensor & tensor) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor = bridge::XlaToAtenTensor(tensor); | |
auto&& x_result = at::native::is_set_to(r_self, r_tensor); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor & XLATypeBase::masked_fill_(at::Tensor & self, const at::Tensor & mask, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::native::masked_fill_(w_self, r_mask, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::masked_fill_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::native::masked_fill_(w_self, r_mask, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::masked_scatter_(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::native::masked_scatter_(w_self, r_mask, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::view(const at::Tensor & self, at::IntList size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::view(r_self, size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::native::put_(w_self, r_index, r_source, accumulate); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_source = bridge::XlaToAtenTensor(source); | |
auto&& x_result = at::native::index_add_(w_self, dim, r_index, r_source); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::native::index_fill_(w_self, dim, r_index, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_value = bridge::XlaToAtenTensor(value); | |
auto&& x_result = at::native::index_fill_(w_self, dim, r_index, r_value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::native::scatter_(w_self, dim, r_index, r_src); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::native::scatter_(w_self, dim, r_index, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto r_src = bridge::XlaToAtenTensor(src); | |
auto&& x_result = at::native::scatter_add_(w_self, dim, r_index, r_src); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::lt_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::lt_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::lt_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::lt_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::gt_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::gt_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::gt_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::gt_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::le_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::le_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::le_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::le_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::ge_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::ge_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::ge_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::ge_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::eq_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::eq_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::eq_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::eq_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::ne_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::ne_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::ne_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::ne_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::__and__(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::__and__(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::__and__(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::__and__(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::__iand__(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::__iand__(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::__iand__(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::__iand__(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::__or__(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::__or__(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::__or__(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::__or__(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::__ior__(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::__ior__(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::__ior__(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::__ior__(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::__xor__(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::__xor__(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::__xor__(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::__xor__(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::__ixor__(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::__ixor__(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::__ixor__(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::__ixor__(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::__lshift__(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::__lshift__(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::__lshift__(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::__lshift__(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::__ilshift__(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::__ilshift__(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::__ilshift__(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::__ilshift__(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor XLATypeBase::__rshift__(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::__rshift__(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::__rshift__(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::__rshift__(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::__irshift__(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::__irshift__(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::__irshift__(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::__irshift__(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::lgamma_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::lgamma_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::atan2_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::atan2_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::tril_(at::Tensor & self, int64_t diagonal) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::detail::infer_type(w_self).tril_(w_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::triu_(at::Tensor & self, int64_t diagonal) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::detail::infer_type(w_self).triu_(w_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::digamma_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::digamma_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::polygamma_(at::Tensor & self, int64_t n) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::polygamma_(w_self, n); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::erfinv_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::erfinv_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::frac_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::frac_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::renorm_(at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::renorm_(w_self, p, dim, maxnorm); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::reciprocal_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::reciprocal_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::neg_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::neg_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::pow_(at::Tensor & self, at::Scalar exponent) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::pow_(w_self, exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::pow_(at::Tensor & self, const at::Tensor & exponent) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::native::pow_(w_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::lerp_(at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::native::lerp_(w_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::sign_(at::Tensor & self) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::sign_(w_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::fmod_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::fmod_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::fmod_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::fmod_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::remainder_(at::Tensor & self, at::Scalar other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::remainder_(w_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::remainder_(at::Tensor & self, const at::Tensor & other) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::native::remainder_(w_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::native::addbmm_(w_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::addbmm_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::addbmm_out(w_result, r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, at::Scalar beta, at::Scalar alpha) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_batch1 = bridge::XlaToAtenTensor(batch1); | |
auto r_batch2 = bridge::XlaToAtenTensor(batch2); | |
auto&& x_result = at::addbmm(r_self, r_batch1, r_batch2, beta, alpha); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::native::addcmul_(w_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::native::addcdiv_(w_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::random_(at::Tensor & self, int64_t from, int64_t to, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::random_(w_self, from, to, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::random_(at::Tensor & self, int64_t to, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::random_(w_self, to, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::random_(at::Tensor & self, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::random_(w_self, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::uniform_(at::Tensor & self, double from, double to, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::uniform_(w_self, from, to, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::normal_(w_self, mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::cauchy_(at::Tensor & self, double median, double sigma, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::cauchy_(w_self, median, sigma, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::log_normal_(at::Tensor & self, double mean, double std, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::log_normal_(w_self, mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::exponential_(at::Tensor & self, double lambd, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::exponential_(w_self, lambd, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::geometric_(at::Tensor & self, double p, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::native::geometric_(w_self, p, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::diag_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::diag_out(w_result, r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::diag(const at::Tensor & self, int64_t diagonal) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::diag(r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::cross_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other, int64_t dim) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::cross_out(w_result, r_self, r_other, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::cross(const at::Tensor & self, const at::Tensor & other, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::cross(r_self, r_other, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::triu_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::triu_out(w_result, r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::triu(const at::Tensor & self, int64_t diagonal) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::triu(r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::tril_out(at::Tensor & result, const at::Tensor & self, int64_t diagonal) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::tril_out(w_result, r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::tril(const at::Tensor & self, int64_t diagonal) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::tril(r_self, diagonal); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::tril_indices(int64_t row, int64_t col, int64_t offset, const at::TensorOptions & options) const { | |
auto&& x_result = at::tril_indices(row, col, offset, options); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(options)); | |
} | |
at::Tensor XLATypeBase::triu_indices(int64_t row, int64_t col, int64_t offset, const at::TensorOptions & options) const { | |
auto&& x_result = at::triu_indices(row, col, offset, options); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(options)); | |
} | |
at::Tensor XLATypeBase::trace(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::trace(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ne_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ne_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ne(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ne(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ne_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::ne_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ne(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::ne(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::eq_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::eq_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::eq(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::eq(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::eq_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::eq_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::eq(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::eq(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ge_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ge_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ge(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::ge(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ge_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::ge_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ge(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::ge(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::le_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::le_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::le(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::le(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::le_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::le_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::le(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::le(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::gt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::gt_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::gt(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::gt(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::gt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::gt_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::gt(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::gt(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::lt_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::lt_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::lt(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::lt(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::lt_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::lt_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::lt(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::lt(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::take_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & index) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::take_out(w_result, r_self, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::take(const at::Tensor & self, const at::Tensor & index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::take(r_self, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::index_select_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::index_select_out(w_result, r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::index_select(r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::masked_select_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & mask) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::masked_select_out(w_result, r_self, r_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::masked_select(const at::Tensor & self, const at::Tensor & mask) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_mask = bridge::XlaToAtenTensor(mask); | |
auto&& x_result = at::masked_select(r_self, r_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::nonzero_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::nonzero_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::nonzero(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::nonzero(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::gather_out(at::Tensor & result, const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::gather_out(w_result, r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::gather(const at::Tensor & self, int64_t dim, const at::Tensor & index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_index = bridge::XlaToAtenTensor(index); | |
auto&& x_result = at::gather(r_self, dim, r_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addcmul_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::addcmul_out(w_result, r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::addcmul(r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::addcdiv_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::addcdiv_out(w_result, r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, at::Scalar value) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_tensor1 = bridge::XlaToAtenTensor(tensor1); | |
auto r_tensor2 = bridge::XlaToAtenTensor(tensor2); | |
auto&& x_result = at::addcdiv(r_self, r_tensor1, r_tensor2, value); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::gels_out(at::Tensor & X, at::Tensor & qr, const at::Tensor & self, const at::Tensor & A) const { | |
auto w_X = bridge::XlaToAtenMutableTensor(X); | |
auto w_qr = bridge::XlaToAtenMutableTensor(qr); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::gels_out(w_X, w_qr, r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(X, qr); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::gels(const at::Tensor & self, const at::Tensor & A) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::gels(r_self, r_A); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::trtrs_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
auto w_X = bridge::XlaToAtenMutableTensor(X); | |
auto w_M = bridge::XlaToAtenMutableTensor(M); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::trtrs_out(w_X, w_M, r_self, r_A, upper, transpose, unitriangular); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(X, M); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::trtrs(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::trtrs(r_self, r_A, upper, transpose, unitriangular); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::symeig_out(at::Tensor & e, at::Tensor & V, const at::Tensor & self, bool eigenvectors, bool upper) const { | |
auto w_e = bridge::XlaToAtenMutableTensor(e); | |
auto w_V = bridge::XlaToAtenMutableTensor(V); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::symeig_out(w_e, w_V, r_self, eigenvectors, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(e, V); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::symeig(const at::Tensor & self, bool eigenvectors, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::symeig(r_self, eigenvectors, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::eig_out(at::Tensor & e, at::Tensor & v, const at::Tensor & self, bool eigenvectors) const { | |
auto w_e = bridge::XlaToAtenMutableTensor(e); | |
auto w_v = bridge::XlaToAtenMutableTensor(v); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::eig_out(w_e, w_v, r_self, eigenvectors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(e, v); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::eig(const at::Tensor & self, bool eigenvectors) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::eig(r_self, eigenvectors); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some, bool compute_uv) const { | |
auto w_U = bridge::XlaToAtenMutableTensor(U); | |
auto w_S = bridge::XlaToAtenMutableTensor(S); | |
auto w_V = bridge::XlaToAtenMutableTensor(V); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::svd_out(w_U, w_S, w_V, r_self, some, compute_uv); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, V); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::svd(const at::Tensor & self, bool some, bool compute_uv) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::svd(r_self, some, compute_uv); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::cholesky_out(at::Tensor & result, const at::Tensor & self, bool upper) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cholesky_out(w_result, r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::cholesky(const at::Tensor & self, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::cholesky(r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cholesky_helper(const at::Tensor & self, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::_cholesky_helper(r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::cholesky_solve_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2, bool upper) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto&& x_result = at::cholesky_solve_out(w_result, r_self, r_input2, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto&& x_result = at::cholesky_solve(r_self, r_input2, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::_cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_A = bridge::XlaToAtenTensor(A); | |
auto&& x_result = at::_cholesky_solve_helper(r_self, r_A, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::potri_out(at::Tensor & result, const at::Tensor & self, bool upper) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::potri_out(w_result, r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::potri(const at::Tensor & self, bool upper) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::potri(r_self, upper); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::pstrf_out(at::Tensor & u, at::Tensor & piv, const at::Tensor & self, bool upper, at::Scalar tol) const { | |
auto w_u = bridge::XlaToAtenMutableTensor(u); | |
auto w_piv = bridge::XlaToAtenMutableTensor(piv); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pstrf_out(w_u, w_piv, r_self, upper, tol); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(u, piv); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::pstrf(const at::Tensor & self, bool upper, at::Scalar tol) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::pstrf(r_self, upper, tol); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & self) const { | |
auto w_Q = bridge::XlaToAtenMutableTensor(Q); | |
auto w_R = bridge::XlaToAtenMutableTensor(R); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::qr_out(w_Q, w_R, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(Q, R); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::qr(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::qr(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::geqrf_out(at::Tensor & result0, at::Tensor & result1, const at::Tensor & self) const { | |
auto w_result0 = bridge::XlaToAtenMutableTensor(result0); | |
auto w_result1 = bridge::XlaToAtenMutableTensor(result1); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::geqrf_out(w_result0, w_result1, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(result0, result1); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::geqrf(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::geqrf(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::orgqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto&& x_result = at::orgqr_out(w_result, r_self, r_input2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::orgqr(const at::Tensor & self, const at::Tensor & input2) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto&& x_result = at::orgqr(r_self, r_input2); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::ormqr_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_input3 = bridge::XlaToAtenTensor(input3); | |
auto&& x_result = at::ormqr_out(w_result, r_self, r_input2, r_input3, left, transpose); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_input2 = bridge::XlaToAtenTensor(input2); | |
auto r_input3 = bridge::XlaToAtenTensor(input3); | |
auto&& x_result = at::ormqr(r_self, r_input2, r_input3, left, transpose); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::btrifact_out(at::Tensor & A_LU, at::Tensor & pivots, const at::Tensor & self, bool pivot) const { | |
auto w_A_LU = bridge::XlaToAtenMutableTensor(A_LU); | |
auto w_pivots = bridge::XlaToAtenMutableTensor(pivots); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::btrifact_out(w_A_LU, w_pivots, r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(A_LU, pivots); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::btrifact(const at::Tensor & self, bool pivot) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::btrifact(r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::btrifact_with_info_out(at::Tensor & A_LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool pivot) const { | |
auto w_A_LU = bridge::XlaToAtenMutableTensor(A_LU); | |
auto w_pivots = bridge::XlaToAtenMutableTensor(pivots); | |
auto w_info = bridge::XlaToAtenMutableTensor(info); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::btrifact_with_info_out(w_A_LU, w_pivots, w_info, r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(A_LU, pivots, info); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::btrifact_with_info(const at::Tensor & self, bool pivot) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::btrifact_with_info(r_self, pivot); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::btrisolve_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_LU_data = bridge::XlaToAtenTensor(LU_data); | |
auto r_LU_pivots = bridge::XlaToAtenTensor(LU_pivots); | |
auto&& x_result = at::btrisolve_out(w_result, r_self, r_LU_data, r_LU_pivots); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::btrisolve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_LU_data = bridge::XlaToAtenTensor(LU_data); | |
auto r_LU_pivots = bridge::XlaToAtenTensor(LU_pivots); | |
auto&& x_result = at::btrisolve(r_self, r_LU_data, r_LU_pivots); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::multinomial_out(at::Tensor & result, const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::multinomial_out(w_result, r_self, num_samples, replacement, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::multinomial(const at::Tensor & self, int64_t num_samples, bool replacement, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::multinomial(r_self, num_samples, replacement, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::lgamma_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::lgamma_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::lgamma(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::lgamma(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::digamma_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::digamma_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::digamma(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::digamma(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::polygamma_out(at::Tensor & result, int64_t n, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::polygamma_out(w_result, n, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::polygamma(int64_t n, const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::polygamma(n, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::erfinv_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::erfinv_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::erfinv(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::erfinv(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::frac_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::frac_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::frac(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::frac(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::dist(const at::Tensor & self, const at::Tensor & other, at::Scalar p) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::dist(r_self, r_other, p); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::reciprocal_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reciprocal_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::reciprocal(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reciprocal(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::neg_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::neg_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::neg(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::neg(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::atan2_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::atan2_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::atan2(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::atan2(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::lerp_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::lerp_out(w_result, r_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::lerp(const at::Tensor & self, const at::Tensor & end, at::Scalar weight) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_end = bridge::XlaToAtenTensor(end); | |
auto&& x_result = at::lerp(r_self, r_end, weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::histc_out(at::Tensor & result, const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::histc_out(w_result, r_self, bins, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::histc(const at::Tensor & self, int64_t bins, at::Scalar min, at::Scalar max) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::histc(r_self, bins, min, max); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::sign_out(at::Tensor & result, const at::Tensor & self) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sign_out(w_result, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::sign(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sign(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::fmod_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::fmod_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::fmod(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::fmod(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::fmod_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::fmod_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::fmod(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::fmod(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::remainder_out(at::Tensor & result, const at::Tensor & self, at::Scalar other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::remainder_out(w_result, r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::remainder(const at::Tensor & self, at::Scalar other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::remainder(r_self, other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::remainder_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::remainder_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::remainder(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::remainder(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::min_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::min_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::min(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::min(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::min(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::min(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::max_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & other) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::max_out(w_result, r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::max(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::max(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::max(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::median(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::median(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool descending) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sort_out(w_values, w_indices, r_self, dim, descending); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::sort(const at::Tensor & self, int64_t dim, bool descending) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::sort(r_self, dim, descending); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
auto w_values = bridge::XlaToAtenMutableTensor(values); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::topk_out(w_values, w_indices, r_self, k, dim, largest, sorted); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(values, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::topk(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::topk(r_self, k, dim, largest, sorted); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor XLATypeBase::all(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::all(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::any(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::any(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::renorm_out(at::Tensor & result, const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::renorm_out(w_result, r_self, p, dim, maxnorm); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::renorm(const at::Tensor & self, at::Scalar p, int64_t dim, at::Scalar maxnorm) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::renorm(r_self, p, dim, maxnorm); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor XLATypeBase::unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::native::unfold(r_self, dimension, size, step); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
bool XLATypeBase::equal(const at::Tensor & self, const at::Tensor & other) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_other = bridge::XlaToAtenTensor(other); | |
auto&& x_result = at::equal(r_self, r_other); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return x_result; | |
} | |
at::Tensor & XLATypeBase::pow_out(at::Tensor & result, const at::Tensor & self, const at::Tensor & exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::pow_out(w_result, r_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::pow(const at::Tensor & self, const at::Tensor & exponent) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::pow(r_self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::pow_out(at::Tensor & result, at::Scalar self, const at::Tensor & exponent) const { | |
auto w_result = bridge::XlaToAtenMutableTensor(result); | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::pow_out(w_result, self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return result; | |
} | |
at::Tensor XLATypeBase::pow(at::Scalar self, const at::Tensor & exponent) const { | |
auto r_exponent = bridge::XlaToAtenTensor(exponent); | |
auto&& x_result = at::pow(self, r_exponent); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(exponent)); | |
} | |
at::Tensor & XLATypeBase::normal_out(at::Tensor & output, const at::Tensor & mean, double std, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto&& x_result = at::normal_out(w_output, r_mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::normal(const at::Tensor & mean, double std, at::Generator * generator) const { | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto&& x_result = at::normal(r_mean, std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(mean)); | |
} | |
at::Tensor & XLATypeBase::normal_out(at::Tensor & output, double mean, const at::Tensor & std, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::normal_out(w_output, mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::normal(double mean, const at::Tensor & std, at::Generator * generator) const { | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::normal(mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(std)); | |
} | |
at::Tensor & XLATypeBase::normal_out(at::Tensor & output, const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::normal_out(w_output, r_mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::normal(const at::Tensor & mean, const at::Tensor & std, at::Generator * generator) const { | |
auto r_mean = bridge::XlaToAtenTensor(mean); | |
auto r_std = bridge::XlaToAtenTensor(std); | |
auto&& x_result = at::normal(r_mean, r_std, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(std)); | |
} | |
at::Tensor XLATypeBase::alias(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::alias(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::_dirichlet_grad_out(at::Tensor & output, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_x = bridge::XlaToAtenTensor(x); | |
auto r_alpha = bridge::XlaToAtenTensor(alpha); | |
auto r_total = bridge::XlaToAtenTensor(total); | |
auto&& x_result = at::_dirichlet_grad_out(w_output, r_x, r_alpha, r_total); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::_dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) const { | |
auto r_x = bridge::XlaToAtenTensor(x); | |
auto r_alpha = bridge::XlaToAtenTensor(alpha); | |
auto r_total = bridge::XlaToAtenTensor(total); | |
auto&& x_result = at::_dirichlet_grad(r_x, r_alpha, r_total); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(total)); | |
} | |
at::Tensor & XLATypeBase::binary_cross_entropy_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::binary_cross_entropy_out(w_output, r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::binary_cross_entropy(r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::binary_cross_entropy_backward_out(w_grad_input, r_grad_output, r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::binary_cross_entropy_backward(r_grad_output, r_self, r_target, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mse_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::mse_loss_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::mse_loss(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::mse_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::mse_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::l1_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::l1_loss_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::l1_loss(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::l1_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::l1_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::multi_margin_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::multi_margin_loss_out(w_output, r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::multi_margin_loss(const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::multi_margin_loss(r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::multi_margin_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, at::Scalar p, at::Scalar margin, const at::Tensor & weight, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::multi_margin_loss_backward(r_grad_output, r_self, r_target, p, margin, r_weight, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::multilabel_margin_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::multilabel_margin_loss_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::multilabel_margin_loss(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_is_target = bridge::XlaToAtenMutableTensor(is_target); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::multilabel_margin_loss_forward_out(w_output, w_is_target, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, is_target); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::multilabel_margin_loss_forward(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_is_target = bridge::XlaToAtenTensor(is_target); | |
auto&& x_result = at::multilabel_margin_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction, r_is_target); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_is_target = bridge::XlaToAtenTensor(is_target); | |
auto&& x_result = at::multilabel_margin_loss_backward(r_grad_output, r_self, r_target, reduction, r_is_target); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::nll_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss_out(w_output, r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::nll_loss(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss(r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_total_weight = bridge::XlaToAtenMutableTensor(total_weight); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss_forward_out(w_output, w_total_weight, r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, total_weight); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss_forward(r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::nll_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::nll_loss_backward(r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::nll_loss2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss2d_out(w_output, r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::nll_loss2d(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss2d(r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_total_weight = bridge::XlaToAtenMutableTensor(total_weight); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss2d_forward_out(w_output, w_total_weight, r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, total_weight); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::nll_loss2d_forward(r_self, r_target, r_weight, reduction, ignore_index); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::nll_loss2d_backward_out(w_grad_input, r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Tensor & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_total_weight = bridge::XlaToAtenTensor(total_weight); | |
auto&& x_result = at::nll_loss2d_backward(r_grad_output, r_self, r_target, r_weight, reduction, ignore_index, r_total_weight); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::smooth_l1_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::smooth_l1_loss_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::smooth_l1_loss(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::smooth_l1_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::smooth_l1_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::soft_margin_loss_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::soft_margin_loss_out(w_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::soft_margin_loss(r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::soft_margin_loss_backward_out(w_grad_input, r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_target = bridge::XlaToAtenTensor(target); | |
auto&& x_result = at::soft_margin_loss_backward(r_grad_output, r_self, r_target, reduction); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::elu_out(at::Tensor & output, const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::elu_out(w_output, r_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::elu(const at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::elu(r_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::elu_backward_out(w_grad_input, r_grad_output, alpha, scale, input_scale, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::elu_backward(const at::Tensor & grad_output, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::elu_backward(r_grad_output, alpha, scale, input_scale, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(output)); | |
} | |
at::Tensor & XLATypeBase::elu_(at::Tensor & self, at::Scalar alpha, at::Scalar scale, at::Scalar input_scale) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::elu_(w_self, alpha, scale, input_scale); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::glu_out(at::Tensor & output, const at::Tensor & self, int64_t dim) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::glu_out(w_output, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::glu(const at::Tensor & self, int64_t dim) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::glu(r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::glu_backward_out(w_grad_input, r_grad_output, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::glu_backward(r_grad_output, r_self, dim); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::hardtanh_out(at::Tensor & output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::hardtanh_out(w_output, r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::hardtanh(const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::hardtanh(r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::hardtanh_backward_out(w_grad_input, r_grad_output, r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::hardtanh_backward(r_grad_output, r_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::hardtanh_(at::Tensor & self, at::Scalar min_val, at::Scalar max_val) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::hardtanh_(w_self, min_val, max_val); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::leaky_relu_out(at::Tensor & output, const at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::leaky_relu_out(w_output, r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::leaky_relu(const at::Tensor & self, at::Scalar negative_slope) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::leaky_relu(r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::leaky_relu_backward_out(w_grad_input, r_grad_output, r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar negative_slope) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::leaky_relu_backward(r_grad_output, r_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::leaky_relu_(at::Tensor & self, at::Scalar negative_slope) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto&& x_result = at::leaky_relu_(w_self, negative_slope); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::log_sigmoid_out(at::Tensor & output, const at::Tensor & self) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_sigmoid_out(w_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::log_sigmoid(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_sigmoid(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_buffer = bridge::XlaToAtenMutableTensor(buffer); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_sigmoid_forward_out(w_output, w_buffer, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, buffer); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::log_sigmoid_forward(const at::Tensor & self) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::log_sigmoid_forward(r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_buffer = bridge::XlaToAtenTensor(buffer); | |
auto&& x_result = at::log_sigmoid_backward_out(w_grad_input, r_grad_output, r_self, r_buffer); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_buffer = bridge::XlaToAtenTensor(buffer); | |
auto&& x_result = at::log_sigmoid_backward(r_grad_output, r_self, r_buffer); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::rrelu_with_noise_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::rrelu_with_noise_out(w_output, r_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::rrelu_with_noise(r_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::rrelu_with_noise_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::rrelu_with_noise_backward_out(w_grad_input, r_grad_output, r_self, r_noise, lower, upper, training); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::rrelu_with_noise_backward(r_grad_output, r_self, r_noise, lower, upper, training); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, at::Scalar lower, at::Scalar upper, bool training, at::Generator * generator) const { | |
auto w_self = bridge::XlaToAtenMutableTensor(self); | |
auto r_noise = bridge::XlaToAtenTensor(noise); | |
auto&& x_result = at::rrelu_with_noise_(w_self, r_noise, lower, upper, training, generator); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return self; | |
} | |
at::Tensor & XLATypeBase::softplus_out(at::Tensor & output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softplus_out(w_output, r_self, beta, threshold); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::softplus(const at::Tensor & self, at::Scalar beta, at::Scalar threshold) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softplus(r_self, beta, threshold); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::softplus_backward_out(w_grad_input, r_grad_output, r_self, beta, threshold, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar beta, at::Scalar threshold, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::softplus_backward(r_grad_output, r_self, beta, threshold, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::softshrink_out(at::Tensor & output, const at::Tensor & self, at::Scalar lambd) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softshrink_out(w_output, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::softshrink(const at::Tensor & self, at::Scalar lambd) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softshrink(r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softshrink_backward_out(w_grad_input, r_grad_output, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, at::Scalar lambd) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::softshrink_backward(r_grad_output, r_self, lambd); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::adaptive_avg_pool2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool2d_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::adaptive_avg_pool2d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool2d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::adaptive_avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool2d_backward_out(w_grad_input, r_grad_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool2d_backward(r_grad_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::adaptive_avg_pool3d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool3d_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::adaptive_avg_pool3d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool3d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool3d_backward_out(w_grad_input, r_grad_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_avg_pool3d_backward(r_grad_output, r_self); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::adaptive_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_max_pool2d_out(w_output, w_indices, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::adaptive_max_pool2d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_max_pool2d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::adaptive_max_pool2d_backward_out(w_grad_input, r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::adaptive_max_pool2d_backward(r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::adaptive_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_max_pool3d_out(w_output, w_indices, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::adaptive_max_pool3d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::adaptive_max_pool3d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::adaptive_max_pool3d_backward_out(w_grad_input, r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::adaptive_max_pool3d_backward(r_grad_output, r_self, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::avg_pool2d_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool2d_out(w_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::avg_pool2d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool2d(r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool2d_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool2d_backward(r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::avg_pool3d_out(at::Tensor & output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool3d_out(w_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::avg_pool3d(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool3d(r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool3d_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, bool ceil_mode, bool count_include_pad) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::avg_pool3d_backward(r_grad_output, r_self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_random_samples = bridge::XlaToAtenTensor(random_samples); | |
auto&& x_result = at::fractional_max_pool2d_out(w_output, w_indices, r_self, kernel_size, output_size, r_random_samples); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::fractional_max_pool2d(const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_random_samples = bridge::XlaToAtenTensor(random_samples); | |
auto&& x_result = at::fractional_max_pool2d(r_self, kernel_size, output_size, r_random_samples); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::fractional_max_pool2d_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, output_size, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::fractional_max_pool2d_backward(r_grad_output, r_self, kernel_size, output_size, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_random_samples = bridge::XlaToAtenTensor(random_samples); | |
auto&& x_result = at::fractional_max_pool3d_out(w_output, w_indices, r_self, kernel_size, output_size, r_random_samples); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::fractional_max_pool3d(const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & random_samples) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_random_samples = bridge::XlaToAtenTensor(random_samples); | |
auto&& x_result = at::fractional_max_pool3d(r_self, kernel_size, output_size, r_random_samples); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::fractional_max_pool3d_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, output_size, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList output_size, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::fractional_max_pool3d_backward(r_grad_output, r_self, kernel_size, output_size, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::max_pool2d_with_indices_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool2d_with_indices_out(w_output, w_indices, r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::max_pool2d_with_indices(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool2d_with_indices(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_pool2d_with_indices_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_pool2d_with_indices_backward(r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::max_pool3d_with_indices_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_indices = bridge::XlaToAtenMutableTensor(indices); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool3d_with_indices_out(w_output, w_indices, r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(output, indices); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::max_pool3d_with_indices(const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::max_pool3d_with_indices(r_self, kernel_size, stride, padding, dilation, ceil_mode); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_pool3d_with_indices_backward_out(w_grad_input, r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, bool ceil_mode, const at::Tensor & indices) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_pool3d_with_indices_backward(r_grad_output, r_self, kernel_size, stride, padding, dilation, ceil_mode, r_indices); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::max_unpool2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool2d_out(w_output, r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool2d(r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::max_unpool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool2d_backward_out(w_grad_input, r_grad_output, r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::max_unpool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool2d_backward(r_grad_output, r_self, r_indices, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::max_unpool3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool3d_out(w_output, r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool3d(r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::max_unpool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool3d_backward_out(w_grad_input, r_grad_output, r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::max_unpool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::IntList output_size, at::IntList stride, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_indices = bridge::XlaToAtenTensor(indices); | |
auto&& x_result = at::max_unpool3d_backward(r_grad_output, r_self, r_indices, output_size, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::reflection_pad1d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad1d_out(w_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::reflection_pad1d(const at::Tensor & self, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad1d(r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad1d_backward_out(w_grad_input, r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad1d_backward(r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::reflection_pad2d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad2d_out(w_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::reflection_pad2d(const at::Tensor & self, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad2d(r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad2d_backward_out(w_grad_input, r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::reflection_pad2d_backward(r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::replication_pad1d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad1d_out(w_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::replication_pad1d(const at::Tensor & self, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad1d(r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad1d_backward_out(w_grad_input, r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad1d_backward(r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::replication_pad2d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad2d_out(w_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::replication_pad2d(const at::Tensor & self, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad2d(r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad2d_backward_out(w_grad_input, r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad2d_backward(r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::replication_pad3d_out(at::Tensor & output, const at::Tensor & self, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad3d_out(w_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::replication_pad3d(const at::Tensor & self, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad3d(r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad3d_backward_out(w_grad_input, r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntList padding) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::replication_pad3d_backward(r_grad_output, r_self, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_linear1d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_linear1d_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_linear1d(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_linear1d(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_linear1d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_linear1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_linear1d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::upsample_bilinear2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_bilinear2d_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_bilinear2d(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_bilinear2d(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_bilinear2d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_bilinear2d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::upsample_bicubic2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_bicubic2d_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_bicubic2d(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_bicubic2d(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_bicubic2d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_bicubic2d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::upsample_trilinear3d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_trilinear3d_out(w_output, r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_trilinear3d(const at::Tensor & self, at::IntList output_size, bool align_corners) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_trilinear3d(r_self, output_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_trilinear3d_backward_out(w_grad_input, r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size, bool align_corners) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_trilinear3d_backward(r_grad_output, output_size, input_size, align_corners); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::upsample_nearest1d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_nearest1d_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_nearest1d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_nearest1d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_nearest1d_backward_out(w_grad_input, r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_nearest1d_backward(r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::upsample_nearest2d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_nearest2d_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_nearest2d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_nearest2d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_nearest2d_backward_out(w_grad_input, r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_nearest2d_backward(r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::upsample_nearest3d_out(at::Tensor & output, const at::Tensor & self, at::IntList output_size) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_nearest3d_out(w_output, r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::upsample_nearest3d(const at::Tensor & self, at::IntList output_size) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto&& x_result = at::upsample_nearest3d(r_self, output_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_nearest3d_backward_out(w_grad_input, r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntList output_size, at::IntList input_size) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto&& x_result = at::upsample_nearest3d_backward(r_grad_output, output_size, input_size); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(grad_output)); | |
} | |
at::Tensor & XLATypeBase::sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::sigmoid_backward_out(w_grad_input, r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::sigmoid_backward(r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(output)); | |
} | |
at::Tensor & XLATypeBase::tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::tanh_backward_out(w_grad_input, r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return grad_input; | |
} | |
at::Tensor XLATypeBase::tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_output = bridge::XlaToAtenTensor(output); | |
auto&& x_result = at::tanh_backward(r_grad_output, r_output); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(output)); | |
} | |
at::Tensor & XLATypeBase::thnn_conv_transpose2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose2d_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::thnn_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose2d(r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv_transpose2d_forward_out(at::Tensor & output, at::Tensor & columns, at::Tensor & ones, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_columns = bridge::XlaToAtenMutableTensor(columns); | |
auto w_ones = bridge::XlaToAtenMutableTensor(ones); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose2d_forward_out(w_output, w_columns, w_ones, r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, columns, ones); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv_transpose2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv_transpose2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::thnn_conv_transpose2d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_columns, r_ones); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv_transpose2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & columns, const at::Tensor & ones, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_columns = bridge::XlaToAtenTensor(columns); | |
auto r_ones = bridge::XlaToAtenTensor(ones); | |
auto&& x_result = at::thnn_conv_transpose2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_columns, r_ones, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::thnn_conv_transpose3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose3d_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::thnn_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose3d(r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv_transpose3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_finput = bridge::XlaToAtenMutableTensor(finput); | |
auto w_fgrad_input = bridge::XlaToAtenMutableTensor(fgrad_input); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose3d_forward_out(w_output, w_finput, w_fgrad_input, r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, finput, fgrad_input); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv_transpose3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_transpose3d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, output_padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv_transpose3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::thnn_conv_transpose3d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_finput, r_fgrad_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv_transpose3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList output_padding, at::IntList dilation, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::thnn_conv_transpose3d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, output_padding, dilation, r_finput, r_fgrad_input, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::thnn_conv2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv2d_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv2d(r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv2d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_finput = bridge::XlaToAtenMutableTensor(finput); | |
auto w_fgrad_input = bridge::XlaToAtenMutableTensor(fgrad_input); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv2d_forward_out(w_output, w_finput, w_fgrad_input, r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, finput, fgrad_input); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::thnn_conv2d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::thnn_conv2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::thnn_conv_depthwise2d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_depthwise2d_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::thnn_conv_depthwise2d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_depthwise2d(r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
at::Tensor & XLATypeBase::thnn_conv_depthwise2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_depthwise2d_forward_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::thnn_conv_depthwise2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv_depthwise2d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv_depthwise2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::thnn_conv_depthwise2d_backward_out(w_grad_input, w_grad_weight, r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &>(grad_input, grad_weight); | |
} | |
std::tuple<at::Tensor,at::Tensor> XLATypeBase::thnn_conv_depthwise2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, at::IntList dilation, std::array<bool,2> output_mask) const { | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto&& x_result = at::thnn_conv_depthwise2d_backward(r_grad_output, r_self, r_weight, kernel_size, stride, padding, dilation, output_mask); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
at::Tensor & XLATypeBase::thnn_conv3d_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv3d_out(w_output, r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return output; | |
} | |
at::Tensor XLATypeBase::thnn_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv3d(r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return bridge::CreateXlaTensor(x_result, bridge::XlaTensorDevice(self)); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv3d_forward_out(at::Tensor & output, at::Tensor & finput, at::Tensor & fgrad_input, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto w_output = bridge::XlaToAtenMutableTensor(output); | |
auto w_finput = bridge::XlaToAtenMutableTensor(finput); | |
auto w_fgrad_input = bridge::XlaToAtenMutableTensor(fgrad_input); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv3d_forward_out(w_output, w_finput, w_fgrad_input, r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(output, finput, fgrad_input); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, const at::Tensor & bias, at::IntList stride, at::IntList padding) const { | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_bias = bridge::XlaToAtenTensor(bias); | |
auto&& x_result = at::thnn_conv3d_forward(r_self, r_weight, kernel_size, r_bias, stride, padding); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor,at::Tensor,at::Tensor>(bridge::CreateXlaTensor(std::get<0>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<1>(x_result), bridge::XlaTensorDevice(self)), bridge::CreateXlaTensor(std::get<2>(x_result), bridge::XlaTensorDevice(self))); | |
} | |
std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> XLATypeBase::thnn_conv3d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::IntList padding, const at::Tensor & finput, const at::Tensor & fgrad_input) const { | |
auto w_grad_input = bridge::XlaToAtenMutableTensor(grad_input); | |
auto w_grad_weight = bridge::XlaToAtenMutableTensor(grad_weight); | |
auto w_grad_bias = bridge::XlaToAtenMutableTensor(grad_bias); | |
auto r_grad_output = bridge::XlaToAtenTensor(grad_output); | |
auto r_self = bridge::XlaToAtenTensor(self); | |
auto r_weight = bridge::XlaToAtenTensor(weight); | |
auto r_finput = bridge::XlaToAtenTensor(finput); | |
auto r_fgrad_input = bridge::XlaToAtenTensor(fgrad_input); | |
auto&& x_result = at::thnn_conv3d_backward_out(w_grad_input, w_grad_weight, w_grad_bias, r_grad_output, r_self, r_weight, kernel_size, stride, padding, r_finput, r_fgrad_input); | |
static_cast<void>(x_result); // Avoid warnings in case not used | |
return std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<at::Tensor,at::Tensor,at::Tensor> XLATypeBase::thnn_conv3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntList kernel_size, at::IntList stride, at::In |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment