Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
// @generated from tools/autograd/templates/python_variable_methods.cpp
#include <Python.h>
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/Size.h"
#include "torch/csrc/autograd/generated/VariableType.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/autograd/utils/python_error_messages.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/jit/tracer.h"
#ifdef USE_CUDA
#include "torch/csrc/cuda/Stream.h"
#include "torch/csrc/cuda/Event.h"
#endif
#include "torch/csrc/utils/cuda_lazy_init.h"
#include "torch/csrc/utils/object_ptr.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/python_numbers.h"
#include "torch/csrc/utils/python_strings.h"
#include "torch/csrc/utils/python_tuples.h"
#include "torch/csrc/utils/tensor_apply.h"
#include "torch/csrc/utils/tensor_list.h"
#include "torch/csrc/utils/tensor_new.h"
#include "torch/csrc/utils/tensor_numpy.h"
#include "torch/csrc/utils/tensor_types.h"
#include "torch/csrc/utils/structseq.h"
#include <ATen/ATen.h>
#include "c10/util/Optional.h"
#include "python_variable_methods_dispatch.h"
#include <stdexcept>
using at::DeviceGuard;
using at::device_of;
using at::OptionalDeviceGuard;
using at::Backend;
using at::Scalar;
using at::ScalarType;
using at::Tensor;
using namespace torch::autograd::utils;
namespace torch { namespace autograd {
static PyObject * THPVariable__is_view(PyObject *self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.is_view()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.requires_grad()) {
throw std::runtime_error(
"Can't call apply_() on Variable that requires grad. Use "
"var.detach().apply_() instead.");
}
return THPVariable_Wrap(torch::utils::apply_(self_, arg));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"size(int64_t dim)",
"size()",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (jit::tracer::isTracing()) {
return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
} else {
return wrap(self_.size(r.toInt64(0)));
}
} else if (r.idx == 1) {
// we can't do the normal wrapping here because IntArrayRef maps to both
// torch.Size and tuple in python.
return THPSize_New(self_);
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"stride(int64_t dim)",
"stride()",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(self_.stride(r.toInt64(0)));
} else if (r.idx == 1) {
// yes, this is called strides in ATen.
IntArrayRef strides = self_.strides();
// we can't do the normal wrapping here because IntArrayRef maps to both
// torch.Size and tuple in python
return THPUtils_packInt64Array(strides.size(), strides.data());
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(self.get_device());
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(self.storage_offset());
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPUtils_packInt64(self_.dim());
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_contiguous(const Tensor & self, at::MemoryFormat memory_format) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return self.contiguous(memory_format);
}
static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"contiguous(*, MemoryFormat memory_format=contiguous_format)",
});
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
auto memory_format = r.toMemoryFormat(0);
// avoids touching the GIL or current device if self is already contiguous
if (self_.is_contiguous(memory_format)) {
// NOTE: this logic is duplicated from VariableType.cpp. Since we need to
// record this call to contiguous() in the trace regardless of whether
// we actually call contiguous here, we need to record this information
// manually.
if (jit::tracer::isTracing()) {
auto tracer_state = jit::tracer::getTracingState();
auto node = tracer_state->graph->create(jit::aten::contiguous, /*num_outputs=*/0);
jit::tracer::recordSourceLocation(node);
jit::tracer::addInputs(node, "self", self_);
jit::tracer::addInputs(node, "memory_format", memory_format);
tracer_state->graph->insertNode(node);
jit::tracer::addOutput(node, self_);
}
Py_INCREF(self);
return self;
}
return THPVariable_Wrap(dispatch_contiguous(self_, memory_format));
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_copy_(Tensor & self, const Tensor & other, bool non_blocking) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return self.copy_(other, non_blocking);
}
static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"copy_(Tensor other, bool non_blocking=False)",
"copy_(Tensor other, bool async=False)|deprecated"
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
END_HANDLE_TH_ERRORS
}
static double dispatch_to_CDouble(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.item<double>();
}
static std::complex<double> dispatch_to_CComplexDouble(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.item<std::complex<double>>();
}
static int64_t dispatch_to_CLong(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.item<int64_t>();
}
static bool dispatch_to_Bool(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
if (self.numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
return self.item<bool>();
}
static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return wrap(dispatch_to_CDouble(self_));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (isFloatingType(self_.scalar_type())) {
// we can't dispatch to item<int64_t> here because we want to avoid ATen overflow checks;
// the python integral type (long in python2) can't overflow.
return THPUtils_packDoubleAsInt(dispatch_to_CDouble(self_));
} else {
return wrap(dispatch_to_CLong(self_));
}
END_HANDLE_TH_ERRORS
}
// This is the __index__ function in Python which is similar to __int__, but
// called when used as a slice.
static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python index", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
// TODO: change the condition to `self_.dim() != 0` once we expose scalars
// in PyTorch.
if (!isIntegralType(self_.scalar_type()) || self_.numel() != 1) {
throw TypeError("only integer tensors of a single element can be converted to an index");
}
return wrap(dispatch_to_CLong(self_));
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_invert(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return 1 - self;
}
static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.scalar_type() != at::kByte) {
throw TypeError("~ (operator.invert) is only implemented on byte tensors");
}
return THPVariable_Wrap(dispatch_invert(self_));
END_HANDLE_TH_ERRORS
}
static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy) {
AutoNoGIL no_gil;
// NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to
// is different with respect to TensorOptions fields that are not present: aten::to inherits fields that
// are missing from the self argument while the tracer assumes that they should be populated with the
// default values (eg. float for scalar type). By explicitly copying over the tensor options here we fully
// specify all tensor options and thus record the proper trace
return self.to(self.options().device(device), non_blocking, copy);
}
static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy) {
AutoNoGIL no_gil;
return self.to(dtype, non_blocking, copy);
}
static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy) {
AutoNoGIL no_gil;
return self.to(device, dtype, non_blocking, copy);
}
static PyObject * THPVariable_cpu(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false, false));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cuda(Device? device=None, bool non_blocking=False)",
"cuda(Device? device=None, bool async=False)|deprecated"
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0);
TORCH_CHECK(device.is_cuda(), "Invalid device, must be cuda device");
torch::utils::cuda_lazy_init();
return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType) {
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_byte(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Byte);
}
static PyObject * THPVariable_char(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Char);
}
static PyObject * THPVariable_double(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Double);
}
static PyObject * THPVariable_float(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Float);
}
static PyObject * THPVariable_half(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Half);
}
static PyObject * THPVariable_int(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Int);
}
static PyObject * THPVariable_long(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Long);
}
static PyObject * THPVariable_short(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Short);
}
static PyObject * THPVariable_bool(PyObject* self, PyObject* args) {
return THPVariable_to_type(self, ScalarType::Bool);
}
static PyObject * THPVariable_element_size(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return THPUtils_packInt64(self_.element_size());
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_numpy(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.requires_grad()) {
throw std::runtime_error(
"Can't call numpy() on Variable that requires grad. "
"Use var.detach().numpy() instead.");
}
return torch::utils::tensor_to_numpy(self_.tensor_data());
END_HANDLE_TH_ERRORS
}
// TODO: move this to ATen. We would need to expose Stream objects in ATen.
static PyObject * THPVariable_record_stream(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
#ifdef USE_CUDA
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (!THCPStream_Check(arg)) {
return PyErr_Format(PyExc_TypeError, "expected Stream object");
}
void* data = self_.data_ptr();
c10::cuda::CUDACachingAllocator::recordStream(data, at::cuda::CUDAStream::unpack(((THCPStream*)arg)->cdata));
Py_RETURN_NONE;
#else
throw std::runtime_error("PyTorch compiled without CUDA support");
#endif
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"requires_grad_(bool requires_grad=True)",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto requires_grad = r.toBool(0);
// should we throw if requires_grad is true? var.requires_grad = True throws here
// but it's nice to let this be a no-op.
if (!self_.is_leaf() && !requires_grad) {
throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
}
if (requires_grad && !self_.is_floating_point()) {
throw std::runtime_error("only Tensors of floating point dtype can require gradients");
}
self_.set_requires_grad(requires_grad);
return THPVariable_Wrap(self_);
END_HANDLE_TH_ERRORS
}
inline bool dispatch_is_contiguous(Tensor & self, MemoryFormat memory_format) {
return self.is_contiguous(memory_format);
}
static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"is_contiguous(*, MemoryFormat memory_format=contiguous_format)",
});
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
auto memory_format = r.toMemoryFormat(0);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_is_contiguous(self, memory_format));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_item(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (self_.is_floating_point()) {
return wrap(dispatch_to_CDouble(self_));
} else if (self_.is_complex()) {
return wrap(dispatch_to_CComplexDouble(self_));
} else if (self_.scalar_type() == ScalarType::Bool) {
return wrap(dispatch_to_Bool(self_));
} else {
return wrap(dispatch_to_CLong(self_));
}
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" });
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
Variable other = r.tensor(0);
if (self_.requires_grad() || other.requires_grad()) {
throw std::runtime_error(
"Can't call map_() on Variable that requires grad. Use "
"var.detach().map_() instead.");
}
return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" });
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
Variable x = r.tensor(0);
Variable y = r.tensor(1);
if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
throw std::runtime_error(
"Can't call map2_() on Variable that requires grad. Use "
"var.detach().map2_() instead.");
}
return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
return THPVariable_Wrap(torch::utils::legacy_tensor_new(self_.dispatch_type(), self_.scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_empty(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
return THPVariable_Wrap(torch::utils::new_empty(self_.dispatch_type(), self_.scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_full(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
return THPVariable_Wrap(torch::utils::new_full(self_.dispatch_type(), self_.scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_ones(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
return THPVariable_Wrap(torch::utils::new_ones(self_.dispatch_type(), self_.scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
return THPVariable_Wrap(torch::utils::new_tensor(self_.dispatch_type(), self_.scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_new_zeros(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
return THPVariable_Wrap(torch::utils::new_zeros(self_.dispatch_type(), self_.scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_storage(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return createPyObject(self_.storage());
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_storage_type(PyObject* self, PyObject* arg)
{
HANDLE_TH_ERRORS
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
auto storage = THPObjectPtr(createPyObject(self_.storage()));
auto storage_type = (PyObject*)Py_TYPE(storage);
Py_INCREF(storage_type);
return storage_type;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
auto parsed = parse_to_conversion(args, kwargs, /*allow_copy*/ true);
auto& device = std::get<0>(parsed);
auto& scalarType = std::get<1>(parsed);
auto non_blocking = std::get<2>(parsed);
auto copy = std::get<3>(parsed);
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
if (device && device->is_cuda()) {
torch::utils::cuda_lazy_init();
}
if (!device && !scalarType && !copy) {
Py_INCREF(self);
return self;
} else if (!device) {
return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy));
} else if (!scalarType) {
return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy));
} else {
return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_tolist(PyObject* self, PyObject* args)
{
HANDLE_TH_ERRORS
jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
auto self_ = reinterpret_cast<THPVariable*>(self)->cdata;
return torch::utils::tensor_to_list(self_);
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"type(PyObject* dtype=None, bool non_blocking=False)",
"type(PyObject* dtype=None, bool async=False)|deprecated"
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.isNone(0)) {
return THPUtils_packString(torch::utils::type_to_string(self_.dispatch_type(), self_.scalar_type()));
}
auto obj = r.pyobject(0);
std::string type_name;
bool is_dtype = false;
if (PyType_Check(obj)) {
if (obj == THPVariableClass) {
type_name = "torch.Tensor";
} else {
type_name = ((PyTypeObject*)obj)->tp_name;
}
} else if (THPUtils_checkString(obj)) {
type_name = THPUtils_unpackString(obj);
} else if (THPDtype_Check(obj)) {
is_dtype = true;
} else {
throw TypeError("dtype must be a type, str, or dtype object");
}
ScalarType scalar_type;
Device device = self_.device();
if (is_dtype) {
scalar_type = r.scalartype(0);
} else {
Type* type;
std::tie(type, scalar_type) = torch::utils::type_from_string(type_name);
auto device_type = backendToDeviceType(type->backend());
if (device_type != device.type()) {
device = at::Device(device_type);
}
}
if (device.is_cuda()) {
torch::utils::cuda_lazy_init();
}
return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false));
END_HANDLE_TH_ERRORS
}
// generated methods start here
static PyObject * THPVariable___and__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__and__(Tensor other)",
"__and__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___and__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___and__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___iand__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__iand__(Tensor other)",
"__iand__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___iand__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___iand__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___ilshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__ilshift__(Tensor other)",
"__ilshift__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___ilshift__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___ilshift__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___ior__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__ior__(Tensor other)",
"__ior__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___ior__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___ior__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___irshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__irshift__(Tensor other)",
"__irshift__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___irshift__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___irshift__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___ixor__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__ixor__(Tensor other)",
"__ixor__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___ixor__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___ixor__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___lshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__lshift__(Tensor other)",
"__lshift__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___lshift__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___lshift__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___or__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__or__(Tensor other)",
"__or__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___or__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___or__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___rshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__rshift__(Tensor other)",
"__rshift__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___rshift__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___rshift__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___xor__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__xor__(Tensor other)",
"__xor__(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___xor__(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch___xor__(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__coalesced_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_coalesced_(bool coalesced)",
}, /*traceable=*/false);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__coalesced_(self, r.toBool(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__dimI(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch__dimI(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__dimV(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch__dimV(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__indices(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch__indices(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__nnz(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch__nnz(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__values(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch__values(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_abs(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_abs(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_abs_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_abs_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_acos(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_acos(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_acos_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_acos_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"add(Scalar alpha, Tensor other)|deprecated",
"add(Tensor other, *, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_add(self, r.scalar(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch_add(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_add_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"add_(Scalar alpha, Tensor other)|deprecated",
"add_(Tensor other, *, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_add_(self, r.scalar(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch_add_(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addbmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addbmm(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
"addbmm(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
"addbmm(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addbmm(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addbmm(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addbmm(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addbmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addbmm_(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
"addbmm_(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
"addbmm_(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addbmm_(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addbmm_(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addbmm_(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addcdiv(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addcdiv(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
"addcdiv(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addcdiv(self, r.scalar(0), r.tensor(1), r.tensor(2)));
} else if (r.idx == 1) {
return wrap(dispatch_addcdiv(self, r.tensor(0), r.tensor(1), r.scalar(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addcdiv_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addcdiv_(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
"addcdiv_(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addcdiv_(self, r.scalar(0), r.tensor(1), r.tensor(2)));
} else if (r.idx == 1) {
return wrap(dispatch_addcdiv_(self, r.tensor(0), r.tensor(1), r.scalar(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addcmul(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addcmul(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
"addcmul(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addcmul(self, r.scalar(0), r.tensor(1), r.tensor(2)));
} else if (r.idx == 1) {
return wrap(dispatch_addcmul(self, r.tensor(0), r.tensor(1), r.scalar(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addcmul_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addcmul_(Scalar value, Tensor tensor1, Tensor tensor2)|deprecated",
"addcmul_(Tensor tensor1, Tensor tensor2, *, Scalar value=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addcmul_(self, r.scalar(0), r.tensor(1), r.tensor(2)));
} else if (r.idx == 1) {
return wrap(dispatch_addcmul_(self, r.tensor(0), r.tensor(1), r.scalar(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmm(Scalar beta, Scalar alpha, Tensor mat1, Tensor mat2)|deprecated",
"addmm(Scalar beta, Tensor mat1, Tensor mat2)|deprecated",
"addmm(Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addmm(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addmm(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addmm(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmm_(Scalar beta, Scalar alpha, Tensor mat1, Tensor mat2)|deprecated",
"addmm_(Scalar beta, Tensor mat1, Tensor mat2)|deprecated",
"addmm_(Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addmm_(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addmm_(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addmm_(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmv(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmv(Scalar beta, Scalar alpha, Tensor mat, Tensor vec)|deprecated",
"addmv(Scalar beta, Tensor mat, Tensor vec)|deprecated",
"addmv(Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addmv(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addmv(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addmv(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmv_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmv_(Scalar beta, Scalar alpha, Tensor mat, Tensor vec)|deprecated",
"addmv_(Scalar beta, Tensor mat, Tensor vec)|deprecated",
"addmv_(Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addmv_(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addmv_(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addmv_(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addr(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addr(Scalar beta, Scalar alpha, Tensor vec1, Tensor vec2)|deprecated",
"addr(Scalar beta, Tensor vec1, Tensor vec2)|deprecated",
"addr(Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addr(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addr(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addr(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addr_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addr_(Scalar beta, Scalar alpha, Tensor vec1, Tensor vec2)|deprecated",
"addr_(Scalar beta, Tensor vec1, Tensor vec2)|deprecated",
"addr_(Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addr_(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_addr_(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_addr_(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_all(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"all()",
"all(int64_t dim, bool keepdim=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_all(self));
} else if (r.idx == 1) {
return wrap(dispatch_all(self, r.toInt64(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_allclose(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"allclose(Tensor other, double rtol=1e-05, double atol=1e-08, bool equal_nan=False)",
}, /*traceable=*/false);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_allclose(self, r.tensor(0), r.toDouble(1), r.toDouble(2), r.toBool(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_any(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"any()",
"any(int64_t dim, bool keepdim=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_any(self));
} else if (r.idx == 1) {
return wrap(dispatch_any(self, r.toInt64(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_argmax(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"argmax(int64_t? dim=c10::nullopt, bool keepdim=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_argmax(self, r.toInt64Optional(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_argmin(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"argmin(int64_t? dim=c10::nullopt, bool keepdim=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_argmin(self, r.toInt64Optional(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_argsort(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"argsort(int64_t dim=-1, bool descending=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_argsort(self, r.toInt64(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_as_strided(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"as_strided(IntArrayRef size, IntArrayRef stride, int64_t? storage_offset=c10::nullopt)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_as_strided(self, r.intlist(0), r.intlist(1), r.toInt64Optional(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_as_strided_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"as_strided_(IntArrayRef size, IntArrayRef stride, int64_t? storage_offset=c10::nullopt)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_as_strided_(self, r.intlist(0), r.intlist(1), r.toInt64Optional(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_asin(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_asin(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_asin_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_asin_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_atan(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan2(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"atan2(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_atan2(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan2_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"atan2_(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_atan2_(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_atan_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_baddbmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"baddbmm(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
"baddbmm(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
"baddbmm(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_baddbmm(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_baddbmm(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_baddbmm(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_baddbmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"baddbmm_(Scalar beta, Scalar alpha, Tensor batch1, Tensor batch2)|deprecated",
"baddbmm_(Scalar beta, Tensor batch1, Tensor batch2)|deprecated",
"baddbmm_(Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_baddbmm_(r.scalar(0), self, r.scalar(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 1) {
return wrap(dispatch_baddbmm_(r.scalar(0), self, r.tensor(1), r.tensor(2)));
} else if (r.idx == 2) {
return wrap(dispatch_baddbmm_(self, r.tensor(0), r.tensor(1), r.scalar(2), r.scalar(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bernoulli(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bernoulli(*, Generator generator=None)",
"bernoulli(double p, *, Generator generator=None)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_bernoulli(self, r.generator(0)));
} else if (r.idx == 1) {
return wrap(dispatch_bernoulli(self, r.toDouble(0), r.generator(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bernoulli_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bernoulli_(Tensor p, *, Generator generator=None)",
"bernoulli_(double p=0.5, *, Generator generator=None)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_bernoulli_(self, r.tensor(0), r.generator(1)));
} else if (r.idx == 1) {
return wrap(dispatch_bernoulli_(self, r.toDouble(0), r.generator(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bincount(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bincount(Tensor? weights=None, int64_t minlength=0)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_bincount(self, r.tensor(0), r.toInt64(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bmm(Tensor mat2)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_bmm(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cauchy_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cauchy_(double median=0, double sigma=1, *, Generator generator=None)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cauchy_(self, r.toDouble(0), r.toDouble(1), r.generator(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_ceil(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_ceil(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_ceil_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_ceil_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cholesky(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cholesky(bool upper=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cholesky(self, r.toBool(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cholesky_inverse(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cholesky_inverse(bool upper=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cholesky_inverse(self, r.toBool(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cholesky_solve(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cholesky_solve(Tensor input2, bool upper=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cholesky_solve(self, r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_chunk(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"chunk(int64_t chunks, int64_t dim=0)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_chunk(self, r.toInt64(0), r.toInt64(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clamp(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"clamp(Scalar? min=c10::nullopt, Scalar? max=c10::nullopt)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_clamp(self, r.scalarOptional(0), r.scalarOptional(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clamp_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"clamp_(Scalar? min=c10::nullopt, Scalar? max=c10::nullopt)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_clamp_(self, r.scalarOptional(0), r.scalarOptional(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clamp_max(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"clamp_max(Scalar max)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_clamp_max(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clamp_max_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"clamp_max_(Scalar max)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_clamp_max_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clamp_min(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"clamp_min(Scalar min)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_clamp_min(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clamp_min_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"clamp_min_(Scalar min)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_clamp_min_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_clone(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_clone(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_coalesce(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_coalesce(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cos(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_cos(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cos_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_cos_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cosh(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_cosh(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cosh_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_cosh_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cross(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cross(Tensor other, int64_t? dim=c10::nullopt)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cross(self, r.tensor(0), r.toInt64Optional(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cumprod(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cumprod(int64_t dim)",
"cumprod(int64_t dim, *, ScalarType dtype)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cumprod(self, r.toInt64(0)));
} else if (r.idx == 1) {
return wrap(dispatch_cumprod(self, r.toInt64(0), r.scalartype(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_cumsum(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"cumsum(int64_t dim)",
"cumsum(int64_t dim, *, ScalarType dtype)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_cumsum(self, r.toInt64(0)));
} else if (r.idx == 1) {
return wrap(dispatch_cumsum(self, r.toInt64(0), r.scalartype(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_data_ptr(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_data_ptr(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_dense_dim(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_dense_dim(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_dequantize(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_dequantize(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_det(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_det(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_detach(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_detach(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_detach_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_detach_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_diag(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"diag(int64_t diagonal=0)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_diag(self, r.toInt64(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_diag_embed(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_diag_embed(self, r.toInt64(0), r.toInt64(1), r.toInt64(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_diagflat(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"diagflat(int64_t offset=0)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_diagflat(self, r.toInt64(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_diagonal(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_diagonal(self, r.toInt64(0), r.toInt64(1), r.toInt64(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_digamma(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_digamma(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_digamma_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_digamma_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_dist(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"dist(Tensor other, Scalar p=2)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_dist(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_div(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"div(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_div(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_div_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"div_(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_div_(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_dot(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"dot(Tensor tensor)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_dot(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_eig(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"eig(bool eigenvectors=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
static PyStructSequence_Field fields0[] = {
{"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr}
};
static PyStructSequence_Desc desc0 = {
"torch.return_types.eig", nullptr,
fields0, 2
};
static PyTypeObject type0;
static bool namedtuple_type_initialized0 = false;
if (!namedtuple_type_initialized0) {
PyStructSequence_InitType(&type0, &desc0);
type0.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;
namedtuple_type_initialized0 = true;
}
if (r.idx == 0) {
return wrap(&type0, dispatch_eig(self, r.toBool(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_eq(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"eq(Tensor other)",
"eq(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_eq(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_eq(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_eq_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"eq_(Tensor other)",
"eq_(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_eq_(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_eq_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_equal(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"equal(Tensor other)",
}, /*traceable=*/false);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_equal(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_erf(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_erf(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_erf_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_erf_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_erfc(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_erfc(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_erfc_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_erfc_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_erfinv(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_erfinv(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_erfinv_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_erfinv_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_exp(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_exp(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_exp_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_exp_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_expand(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"expand(IntArrayRef size, *, bool implicit=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_expand(self, r.intlist(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_expand_as(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"expand_as(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_expand_as(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_expm1(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_expm1(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_expm1_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_expm1_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_exponential_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"exponential_(double lambd=1, *, Generator generator=None)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_exponential_(self, r.toDouble(0), r.generator(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_fft(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"fft(int64_t signal_ndim, bool normalized=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_fft(self, r.toInt64(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_fill_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"fill_(Tensor value)",
"fill_(Scalar value)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_fill_(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_fill_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_flatten(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"flatten(int64_t start_dim=0, int64_t end_dim=-1)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_flatten(self, r.toInt64(0), r.toInt64(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_flip(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"flip(IntArrayRef dims)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_flip(self, r.intlist(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_floor(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_floor(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_floor_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_floor_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_fmod(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"fmod(Tensor other)",
"fmod(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_fmod(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_fmod(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_fmod_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"fmod_(Tensor other)",
"fmod_(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_fmod_(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_fmod_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_frac(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_frac(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_frac_(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(dispatch_frac_(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_gather(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"gather(int64_t dim, Tensor index, *, bool sparse_grad=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_gather(self, r.toInt64(0), r.tensor(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_ge(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"ge(Tensor other)",
"ge(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_ge(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_ge(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_ge_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"ge_(Tensor other)",
"ge_(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_ge_(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_ge_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_gels(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"gels(Tensor A)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
static PyStructSequence_Field fields0[] = {
{"solution", ""}, {"QR", ""}, {nullptr}
};
static PyStructSequence_Desc desc0 = {
"torch.return_types.gels", nullptr,
fields0, 2
};
static PyTypeObject type0;
static bool namedtuple_type_initialized0 = false;
if (!namedtuple_type_initialized0) {
PyStructSequence_InitType(&type0, &desc0);
type0.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;
namedtuple_type_initialized0 = true;
}
if (r.idx == 0) {
return wrap(&type0, dispatch_gels(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_geometric_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"geometric_(double p, *, Generator generator=None)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_geometric_(self, r.toDouble(0), r.generator(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_geqrf(PyObject* self_, PyObject* args)
{
HANDLE_TH_ERRORS
static PyStructSequence_Field fields0[] = {
{"a", ""}, {"tau", ""}, {nullptr}
};
static PyStructSequence_Desc desc0 = {
"torch.return_types.geqrf", nullptr,
fields0, 2
};
static PyTypeObject type0;
static bool namedtuple_type_initialized0 = false;
if (!namedtuple_type_initialized0) {
PyStructSequence_InitType(&type0, &desc0);
type0.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;
namedtuple_type_initialized0 = true;
}
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
return wrap(&type0, dispatch_geqrf(self));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_ger(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"ger(Tensor vec2)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_ger(self, r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_gt(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"gt(Tensor other)",
"gt(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_gt(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_gt(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_gt_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"gt_(Tensor other)",
"gt_(Scalar other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_gt_(self, r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch_gt_(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_hardshrink(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"hardshrink(Scalar lambd=0.5)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_hardshrink(self, r.scalar(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_histc(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"histc(int64_t bins=100, Scalar min=0, Scalar max=0)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_histc(self, r.toInt64(0), r.scalar(1), r.scalar(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_ifft(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"ifft(int64_t signal_ndim, bool normalized=False)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_ifft(self, r.toInt64(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_index_add(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"index_add(int64_t dim, Tensor index, Tensor source)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_index_add(self, r.toInt64(0), r.tensor(1), r.tensor(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_index_add_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"index_add_(int64_t dim, Tensor index, Tensor source)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_index_add_(self, r.toInt64(0), r.tensor(1), r.tensor(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_index_copy(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"index_copy(int64_t dim, Tensor index, Tensor source)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);