Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
// @generated from tools/autograd/templates/python_torch_functions.cpp
// Python bindings for torch.* functions implemented through ATen.
//
// The functions are bound as static methods on a class
// torch._C._VariableFunctions which is also aliased as Variable._torch
// and also copied into 'torch' module.
#include <Python.h>
#include "python_torch_functions_dispatch.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/Dtype.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/tensor_layouts.h"
#include "torch/csrc/utils/tensor_new.h"
#include "torch/csrc/utils/tensor_numpy.h"
#include "torch/csrc/jit/tracer.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/utils/structseq.h"
#include <ATen/ATen.h>
#include <functional>
#include <initializer_list>
#include <stdexcept>
#include <utility>
using at::Tensor;
using at::Device;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using at::OptionalDeviceGuard;
using at::DeviceGuard;
using at::TensorOptions;
using namespace torch::autograd::utils;
namespace torch { namespace autograd {
static void check_out_type_matches(Tensor result,
ScalarType scalarType, bool scalarType_is_none,
const THPLayout& layout, bool layout_is_none,
const Device& device, bool device_is_none) {
if (scalarType_is_none && layout_is_none && device_is_none) { // common case
return;
}
if (!scalarType_is_none && result.scalar_type() != scalarType) {
AT_ERROR(
"dtype ", scalarType,
" does not match dtype of out parameter (", result.scalar_type(), ")");
}
auto scalarType_arg = scalarType_is_none ? result.scalar_type() : scalarType;
auto layout_arg = layout_is_none ? *torch::getLayout(result.type().backend()) : layout;
auto device_type_arg = device_is_none ? result.device().type() : device.type();
const auto& type = torch::getVariableType(scalarType_arg, layout_arg, device_type_arg);
if (result.dispatch_type() != type) {
AT_ERROR(
"type corresponding to ", type.toString(),
" does not match type of out parameter (", result.type().toString(), ")");
}
}
inline Tensor dispatch_arange(Scalar end, Tensor result) {
AutoNoGIL no_gil;
return at::arange_out(result, end);
}
inline Tensor dispatch_arange(Scalar end, const TensorOptions& options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
return torch::arange(end, options);
}
inline Tensor dispatch_arange(Scalar start, Scalar end, Scalar step, Tensor result) {
AutoNoGIL no_gil;
return at::arange_out(result, start, end, step);
}
inline Tensor dispatch_arange(Scalar start, Scalar end, Scalar step, const TensorOptions& options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
return torch::arange(start, end, step, options);
}
static inline bool allIntegral(std::initializer_list<std::reference_wrapper<Scalar>> l) {
for (Scalar& s : l) {
if (!s.isIntegral()) {
return false;
}
}
return true;
}
static PyObject * THPVariable_arange(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"arange(Scalar end, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)",
"arange(Scalar start, Scalar end, Scalar step=1, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)",
});
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(1)) {
auto end = r.scalar(0);
// NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
auto scalarType = r.isNone(2) && allIntegral({end}) ? at::ScalarType::Long : r.scalartype(2);
const auto options = TensorOptions()
.dtype(scalarType)
.device(r.device(4))
.layout(r.layout(3).layout)
.requires_grad(r.toBool(6))
.pinned_memory(r.toBool(5));
return wrap(dispatch_arange(end, options));
} else {
TORCH_CHECK(!r.toBool(5), " `pin_memory` and `out` parameters are incompatible");
check_out_type_matches(r.tensor(1), r.scalartype(2), r.isNone(2), r.layout(3), r.isNone(3),
r.device(4), r.isNone(4));
return wrap(dispatch_arange(r.scalar(0), r.tensor(1)).set_requires_grad(r.toBool(6)));
}
} else if (r.idx == 1) {
if (r.isNone(3)) {
auto start = r.scalar(0);
auto end = r.scalar(1);
auto step = r.scalar(2);
// NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
auto scalarType = r.isNone(4) && allIntegral({start, end, step}) ? at::ScalarType::Long : r.scalartype(4);
const auto options = TensorOptions()
.dtype(scalarType)
.device(r.device(6))
.layout(r.layout(5).layout)
.requires_grad(r.toBool(8))
.pinned_memory(r.toBool(7));
return wrap(dispatch_arange(start, end, step, options));
} else {
TORCH_CHECK(!r.toBool(7), " `pin_memory` and `out` parameters are incompatible");
check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4), r.layout(5), r.isNone(5),
r.device(6), r.isNone(6));
return wrap(dispatch_arange(r.scalar(0), r.scalar(1), r.scalar(2), r.tensor(3)).set_requires_grad(r.toBool(8)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
inline Tensor dispatch_range(Scalar start, Scalar end, Scalar step, Tensor result) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(result));
return at::range_out(result, start, end, step);
}
inline Tensor dispatch_range(Scalar start, Scalar end, Scalar step, const TensorOptions& options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
DeviceGuard device_guard(options.device());
return torch::range(start, end, step, options);
}
static PyObject * THPVariable_range(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"range(Scalar start, Scalar end, Scalar step=1, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
});
ParsedArgs<8> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
PyErr_WarnEx(PyExc_UserWarning, "torch.range is deprecated in favor of torch.arange "
"and will be removed in 0.5. Note that arange generates values in [start; end), "
"not [start; end].", 1);
if (r.isNone(3)) {
const auto options = TensorOptions()
.dtype(r.scalartype(4))
.device(r.device(6))
.layout(r.layout(5).layout)
.requires_grad(r.toBool(7));
return wrap(dispatch_range(r.scalar(0), r.scalar(1), r.scalar(2), options));
} else {
check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
r.layout(5), r.isNone(5),
r.device(6), r.isNone(6));
return wrap(dispatch_range(r.scalar(0), r.scalar(1), r.scalar(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Generator * generator, Tensor result) {
AutoNoGIL no_gil;
return at::randint_out(result, high, size, generator);
}
inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
return torch::randint(high, size, generator, options);
}
inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Tensor result) {
AutoNoGIL no_gil;
return at::randint_out(result, high, size);
}
inline Tensor dispatch_randint(int64_t high, IntArrayRef size, const TensorOptions & options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
return torch::randint(high, size, options);
}
inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, Tensor result) {
AutoNoGIL no_gil;
return at::randint_out(result, low, high, size, generator);
}
inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
return torch::randint(low, high, size, generator, options);
}
inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Tensor result) {
AutoNoGIL no_gil;
return at::randint_out(result, low, high, size);
}
inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, const TensorOptions & options) {
maybe_initialize_cuda(options);
AutoNoGIL no_gil;
return torch::randint(low, high, size, options);
}
static PyObject * THPVariable_randint(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"randint(int64_t high, IntArrayRef size, *, Generator generator, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
"randint(int64_t high, IntArrayRef size, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
"randint(int64_t low, int64_t high, IntArrayRef size, *, Generator generator, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
"randint(int64_t low, int64_t high, IntArrayRef size, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
}, /*traceable=*/false);
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(3)) {
auto high = r.toInt64(0);
auto size = r.intlist(1);
auto generator = r.generator(2);
// NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
auto dtype = r.scalartypeWithDefault(4, at::ScalarType::Long);
auto device = r.device(6);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(5).layout)
.requires_grad(r.toBool(7));
return wrap(dispatch_randint(high, size, generator, options));
} else {
check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
r.layout(5), r.isNone(5),
r.device(6), r.isNone(6));
return wrap(dispatch_randint(r.toInt64(0), r.intlist(1), r.generator(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
}
} else if (r.idx == 1) {
if (r.isNone(2)) {
auto high = r.toInt64(0);
auto size = r.intlist(1);
// NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
auto dtype = r.scalartypeWithDefault(3, at::ScalarType::Long);
auto device = r.device(5);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(4).layout)
.requires_grad(r.toBool(6));
return wrap(dispatch_randint(high, size, options));
} else {
check_out_type_matches(r.tensor(2), r.scalartype(3), r.isNone(3),
r.layout(4), r.isNone(4),
r.device(5), r.isNone(5));
return wrap(dispatch_randint(r.toInt64(0), r.intlist(1), r.tensor(2)).set_requires_grad(r.toBool(6)));
}
} else if (r.idx == 2) {
if (r.isNone(4)) {
auto low = r.toInt64(0);
auto high = r.toInt64(1);
auto size = r.intlist(2);
auto generator = r.generator(3);
// NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
auto dtype = r.scalartypeWithDefault(5, at::ScalarType::Long);
auto device = r.device(7);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(6).layout)
.requires_grad(r.toBool(8));
return wrap(dispatch_randint(low, high, size, generator, options));
} else {
check_out_type_matches(r.tensor(4), r.scalartype(5), r.isNone(5),
r.layout(6), r.isNone(6),
r.device(7), r.isNone(7));
return wrap(dispatch_randint(r.toInt64(0), r.toInt64(1), r.intlist(2), r.generator(3), r.tensor(4)).set_requires_grad(r.toBool(8)));
}
} else if (r.idx == 3) {
if (r.isNone(3)) {
auto low = r.toInt64(0);
auto high = r.toInt64(1);
auto size = r.intlist(2);
// NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
auto dtype = r.scalartypeWithDefault(4, at::ScalarType::Long);
auto device = r.device(6);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(5).layout)
.requires_grad(r.toBool(7));
return wrap(dispatch_randint(low, high, size, options));
} else {
check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
r.layout(5), r.isNone(5),
r.device(6), r.isNone(6));
return wrap(dispatch_randint(r.toInt64(0), r.toInt64(1), r.intlist(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_as_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
jit::tracer::warn("torch.as_tensor", jit::tracer::WARN_CONSTRUCTOR);
return THPVariable_Wrap(torch::utils::as_tensor(default_type(), default_scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_from_numpy(PyObject* module, PyObject* arg)
{
HANDLE_TH_ERRORS
jit::tracer::warn("torch.from_numpy", jit::tracer::WARN_CONSTRUCTOR);
auto data = torch::utils::tensor_from_numpy(arg);
return THPVariable_Wrap(make_variable(std::move(data), /*requires_grad=*/false));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__promote_types(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_promote_types(ScalarType type1, ScalarType type2)",
});
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
ScalarType promoted = at::promoteTypes(r.scalartype(0), r.scalartype(1));
return torch::autograd::utils::wrap(torch::getDtype(promoted));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_sparse_coo_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
jit::tracer::warn("torch.sparse_coo_tensor", jit::tracer::WARN_CONSTRUCTOR);
return THPVariable_Wrap(torch::utils::sparse_coo_tensor_ctor(default_type(), default_scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
jit::tracer::warn("torch.tensor", jit::tracer::WARN_CONSTRUCTOR);
return THPVariable_Wrap(torch::utils::tensor_ctor(default_type(), default_scalar_type(), args, kwargs));
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"get_device(Tensor input)",
}, /*traceable=*/false);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(r.tensor(0).get_device());
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
// generated methods start here
static PyObject * THPVariable___and__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__and__(Tensor input, Tensor other)",
"__and__(Tensor input, Scalar other)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___and__(r.tensor(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch___and__(r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___lshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__lshift__(Tensor input, Tensor other)",
"__lshift__(Tensor input, Scalar other)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___lshift__(r.tensor(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch___lshift__(r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___or__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__or__(Tensor input, Tensor other)",
"__or__(Tensor input, Scalar other)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___or__(r.tensor(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch___or__(r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___rshift__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__rshift__(Tensor input, Tensor other)",
"__rshift__(Tensor input, Scalar other)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___rshift__(r.tensor(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch___rshift__(r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable___xor__(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"__xor__(Tensor input, Tensor other)",
"__xor__(Tensor input, Scalar other)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch___xor__(r.tensor(0), r.tensor(1)));
} else if (r.idx == 1) {
return wrap(dispatch___xor__(r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__adaptive_avg_pool2d(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_adaptive_avg_pool2d(Tensor input, IntArrayRef[2] output_size)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__adaptive_avg_pool2d(r.tensor(0), r.intlist(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__addmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_addmm(Tensor input, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch__addmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch__addmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__addmm_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_addmm_(Tensor input, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__addmm_(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__addr(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_addr(Tensor input, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch__addr(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch__addr(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__addr_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_addr_(Tensor input, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__addr_(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__baddbmm_mkl_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_baddbmm_mkl_(Tensor input, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__baddbmm_mkl_(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__batch_norm_impl_index(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, double momentum, double eps, bool cudnn_enabled)",
}, /*traceable=*/true);
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__batch_norm_impl_index(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4), r.toBool(5), r.toDouble(6), r.toDouble(7), r.toBool(8)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Byte(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Byte(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Byte(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Char(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Char(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Char(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Double(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Double(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Double(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Float(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Float(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Float(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Half(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Half(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Half(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Int(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Int(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Int(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Long(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Long(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Long(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cast_Short(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cast_Short(Tensor input, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cast_Short(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cat(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cat(TensorList tensors, int64_t dim=0, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(2)) {
return wrap(dispatch__cat(r.tensorlist(0), r.toInt64(1)));
} else {
return wrap(dispatch__cat(r.tensorlist(0), r.toInt64(1), r.tensor(2)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__convolution(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_convolution(Tensor input, Tensor weight, Tensor? bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled)",
}, /*traceable=*/true);
ParsedArgs<12> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__convolution(r.tensor(0), r.tensor(1), r.tensor(2), r.intlist(3), r.intlist(4), r.intlist(5), r.toBool(6), r.intlist(7), r.toInt64(8), r.toBool(9), r.toBool(10), r.toBool(11)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__convolution_nogroup(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding)",
}, /*traceable=*/true);
ParsedArgs<8> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__convolution_nogroup(r.tensor(0), r.tensor(1), r.tensor(2), r.intlist(3), r.intlist(4), r.intlist(5), r.toBool(6), r.intlist(7)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__copy_from(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_copy_from(Tensor input, Tensor dst, bool non_blocking=False)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__copy_from(r.tensor(0), r.tensor(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__ctc_loss(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_ctc_loss(Tensor log_probs, Tensor targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=False)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__ctc_loss(r.tensor(0), r.tensor(1), r.intlist(2), r.intlist(3), r.toInt64(4), r.toBool(5)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cudnn_ctc_loss(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cudnn_ctc_loss(Tensor log_probs, Tensor targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity)",
}, /*traceable=*/true);
ParsedArgs<7> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cudnn_ctc_loss(r.tensor(0), r.tensor(1), r.intlist(2), r.intlist(3), r.toInt64(4), r.toBool(5), r.toBool(6)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cudnn_init_dropout_state(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, *, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)",
}, /*traceable=*/true);
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
auto dropout = r.toDouble(0);
auto train = r.toBool(1);
auto dropout_seed = r.toInt64(2);
auto dtype = r.scalartype(3);
auto device = r.device(5);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(4).layout)
.requires_grad(r.toBool(7))
.pinned_memory(r.toBool(6));
return wrap(dispatch__cudnn_init_dropout_state(dropout, train, dropout_seed, options));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cudnn_rnn(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cudnn_rnn(Tensor input, TensorList weight, int64_t weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, Tensor? dropout_state)",
}, /*traceable=*/true);
ParsedArgs<15> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cudnn_rnn(r.tensor(0), r.tensorlist(1), r.toInt64(2), r.tensor(3), r.tensor(4), r.tensor(5), r.toInt64(6), r.toInt64(7), r.toInt64(8), r.toBool(9), r.toDouble(10), r.toBool(11), r.toBool(12), r.intlist(13), r.tensor(14)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cudnn_rnn_flatten_weight(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional)",
}, /*traceable=*/true);
ParsedArgs<8> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cudnn_rnn_flatten_weight(r.tensorlist(0), r.toInt64(1), r.toInt64(2), r.toInt64(3), r.toInt64(4), r.toInt64(5), r.toBool(6), r.toBool(7)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cufft_clear_plan_cache(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cufft_clear_plan_cache(int64_t device_index)",
}, /*traceable=*/false);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
dispatch__cufft_clear_plan_cache(r.toInt64(0));
Py_RETURN_NONE;
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cufft_get_plan_cache_max_size(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cufft_get_plan_cache_max_size(int64_t device_index)",
}, /*traceable=*/false);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cufft_get_plan_cache_max_size(r.toInt64(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cufft_get_plan_cache_size(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cufft_get_plan_cache_size(int64_t device_index)",
}, /*traceable=*/false);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__cufft_get_plan_cache_size(r.toInt64(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__cufft_set_plan_cache_max_size(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size)",
}, /*traceable=*/false);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
dispatch__cufft_set_plan_cache_max_size(r.toInt64(0), r.toInt64(1));
Py_RETURN_NONE;
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__debug_has_internal_overlap(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_debug_has_internal_overlap(Tensor input)",
}, /*traceable=*/false);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__debug_has_internal_overlap(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__dequantize_linear(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_dequantize_linear(Tensor input, double scale, int64_t zero_point, ScalarType dtype)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__dequantize_linear(r.tensor(0), r.toDouble(1), r.toInt64(2), r.scalartype(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__dim_arange(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_dim_arange(Tensor like, int64_t dim)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__dim_arange(r.tensor(0), r.toInt64(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__dirichlet_grad(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_dirichlet_grad(Tensor x, Tensor alpha, Tensor total, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(3)) {
return wrap(dispatch__dirichlet_grad(r.tensor(0), r.tensor(1), r.tensor(2)));
} else {
return wrap(dispatch__dirichlet_grad(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__embedding_bag(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int64_t mode=0, bool sparse=False, Tensor? per_sample_weights=None)",
}, /*traceable=*/true);
ParsedArgs<7> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__embedding_bag(r.tensor(0), r.tensor(1), r.tensor(2), r.toBool(3), r.toInt64(4), r.toBool(5), r.tensor(6)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__empty_affine_quantized(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_empty_affine_quantized(IntArrayRef size, *, double scale=1, int64_t zero_point=0, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)",
}, /*traceable=*/true);
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
auto size = r.intlist(0);
auto scale = r.toDouble(1);
auto zero_point = r.toInt64(2);
auto dtype = r.scalartype(3);
auto device = r.device(5);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(4).layout)
.requires_grad(r.toBool(7))
.pinned_memory(r.toBool(6));
return wrap(dispatch__empty_affine_quantized(size, scale, zero_point, options));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__fft_with_size(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_fft_with_size(Tensor input, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes)",
}, /*traceable=*/true);
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__fft_with_size(r.tensor(0), r.toInt64(1), r.toBool(2), r.toBool(3), r.toBool(4), r.intlist(5), r.toBool(6), r.toBool(7), r.intlist(8)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__fused_dropout(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_fused_dropout(Tensor input, double p, Generator generator=None)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__fused_dropout(r.tensor(0), r.toDouble(1), r.generator(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__index_copy_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_index_copy_(Tensor input, int64_t dim, Tensor index, Tensor source)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__index_copy_(r.tensor(0), r.toInt64(1), r.tensor(2), r.tensor(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__log_softmax(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_log_softmax(Tensor input, int64_t dim, bool half_to_float)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__log_softmax(r.tensor(0), r.toInt64(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__log_softmax_backward_data(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_log_softmax_backward_data(Tensor grad_output, Tensor output, int64_t dim, Tensor input)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__log_softmax_backward_data(r.tensor(0), r.tensor(1), r.toInt64(2), r.tensor(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__lu_with_info(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_lu_with_info(Tensor input, bool pivot=True, bool check_errors=True)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__lu_with_info(r.tensor(0), r.toBool(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__masked_scale(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_masked_scale(Tensor input, Tensor mask, double scale)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__masked_scale(r.tensor(0), r.tensor(1), r.toDouble(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__max(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_max(Tensor input, int64_t dim, bool keepdim=False, *, TensorList[2] out=None)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(3)) {
return wrap(dispatch__max(r.tensor(0), r.toInt64(1), r.toBool(2)));
} else {
auto results = r.tensorlist_n<2>(3);
return wrap(dispatch__max(r.tensor(0), r.toInt64(1), r.toBool(2), results[0], results[1]));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__min(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_min(Tensor input, int64_t dim, bool keepdim=False, *, TensorList[2] out=None)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(3)) {
return wrap(dispatch__min(r.tensor(0), r.toInt64(1), r.toBool(2)));
} else {
auto results = r.tensorlist_n<2>(3);
return wrap(dispatch__min(r.tensor(0), r.toInt64(1), r.toBool(2), results[0], results[1]));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__mode(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_mode(Tensor input, int64_t dim=-1, bool keepdim=False, *, TensorList[2] out=None)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(3)) {
return wrap(dispatch__mode(r.tensor(0), r.toInt64(1), r.toBool(2)));
} else {
auto results = r.tensorlist_n<2>(3);
return wrap(dispatch__mode(r.tensor(0), r.toInt64(1), r.toBool(2), results[0], results[1]));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__multinomial_alias_draw(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_multinomial_alias_draw(Tensor J, Tensor q, int64_t num_samples, *, Generator generator=None)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__multinomial_alias_draw(r.tensor(0), r.tensor(1), r.toInt64(2), r.generator(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__multinomial_alias_setup(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_multinomial_alias_setup(Tensor probs)",
}, /*traceable=*/true);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__multinomial_alias_setup(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__nnpack_available(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_nnpack_available()",
}, /*traceable=*/false);
ParsedArgs<0> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__nnpack_available());
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__nnpack_spatial_convolution(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, IntArrayRef[2] padding)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__nnpack_spatial_convolution(r.tensor(0), r.tensor(1), r.tensor(2), r.intlist(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__pack_padded_sequence(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__pack_padded_sequence(r.tensor(0), r.tensor(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__pad_packed_sequence(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__pad_packed_sequence(r.tensor(0), r.tensor(1), r.toBool(2), r.scalar(3), r.toInt64(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__per_tensor_affine_qtensor(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_per_tensor_affine_qtensor(Tensor input, double scale, int64_t zero_point)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__per_tensor_affine_qtensor(r.tensor(0), r.toDouble(1), r.toInt64(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__reshape_from_tensor(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_reshape_from_tensor(Tensor input, Tensor shape)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__reshape_from_tensor(r.tensor(0), r.tensor(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__s_where(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_s_where(Tensor condition, Tensor input, Tensor other)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__s_where(r.tensor(0), r.tensor(1), r.tensor(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sample_dirichlet(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sample_dirichlet(Tensor input, Generator generator=None)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sample_dirichlet(r.tensor(0), r.generator(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__shape_as_tensor(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_shape_as_tensor(Tensor input)",
}, /*traceable=*/true);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__shape_as_tensor(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sobol_engine_draw(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sobol_engine_draw(Tensor quasi, int64_t n, Tensor sobolstate, int64_t dimension, int64_t num_generated, ScalarType? dtype)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sobol_engine_draw(r.tensor(0), r.toInt64(1), r.tensor(2), r.toInt64(3), r.toInt64(4), r.scalartypeOptional(5)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sobol_engine_ff_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sobol_engine_ff_(Tensor input, int64_t n, Tensor sobolstate, int64_t dimension, int64_t num_generated)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sobol_engine_ff_(r.tensor(0), r.toInt64(1), r.tensor(2), r.toInt64(3), r.toInt64(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sobol_engine_initialize_state_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sobol_engine_initialize_state_(Tensor input, int64_t dimension)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sobol_engine_initialize_state_(r.tensor(0), r.toInt64(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sobol_engine_scramble_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sobol_engine_scramble_(Tensor input, Tensor ltm, int64_t dimension)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sobol_engine_scramble_(r.tensor(0), r.tensor(1), r.toInt64(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__softmax(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_softmax(Tensor input, int64_t dim, bool half_to_float)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__softmax(r.tensor(0), r.toInt64(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__softmax_backward_data(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_softmax_backward_data(Tensor grad_output, Tensor output, int64_t dim, Tensor input)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__softmax_backward_data(r.tensor(0), r.tensor(1), r.toInt64(2), r.tensor(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sparse_addmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sparse_addmm(Tensor input, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sparse_addmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sparse_mm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sparse_mm(Tensor sparse, Tensor dense)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sparse_mm(r.tensor(0), r.tensor(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__sparse_sum(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_sparse_sum(Tensor input)",
"_sparse_sum(Tensor input, *, ScalarType dtype)",
"_sparse_sum(Tensor input, IntArrayRef[1] dim)",
"_sparse_sum(Tensor input, IntArrayRef[1] dim, *, ScalarType dtype)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__sparse_sum(r.tensor(0)));
} else if (r.idx == 1) {
return wrap(dispatch__sparse_sum(r.tensor(0), r.scalartype(1)));
} else if (r.idx == 2) {
return wrap(dispatch__sparse_sum(r.tensor(0), r.intlist(1)));
} else if (r.idx == 3) {
return wrap(dispatch__sparse_sum(r.tensor(0), r.intlist(1), r.scalartype(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__standard_gamma(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_standard_gamma(Tensor input, Generator generator=None)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__standard_gamma(r.tensor(0), r.generator(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__standard_gamma_grad(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_standard_gamma_grad(Tensor input, Tensor output)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__standard_gamma_grad(r.tensor(0), r.tensor(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__std(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_std(Tensor input, bool unbiased=True)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__std(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__trilinear(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_trilinear(Tensor i1, Tensor i2, Tensor i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim=1)",
}, /*traceable=*/true);
ParsedArgs<8> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__trilinear(r.tensor(0), r.tensor(1), r.tensor(2), r.intlist(3), r.intlist(4), r.intlist(5), r.intlist(6), r.toInt64(7)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__unique(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_unique(Tensor input, bool sorted=True, bool return_inverse=False)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__unique(r.tensor(0), r.toBool(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__unique2(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_unique2(Tensor input, bool sorted=True, bool return_inverse=False, bool return_counts=False)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__unique2(r.tensor(0), r.toBool(1), r.toBool(2), r.toBool(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__var(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_var(Tensor input, bool unbiased=True)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__var(r.tensor(0), r.toBool(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__weight_norm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_weight_norm(Tensor v, Tensor g, int64_t dim=0)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__weight_norm(r.tensor(0), r.tensor(1), r.toInt64(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable__weight_norm_cuda_interface(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"_weight_norm_cuda_interface(Tensor v, Tensor g, int64_t dim=0)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch__weight_norm_cuda_interface(r.tensor(0), r.tensor(1), r.toInt64(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_abs(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"abs(Tensor input, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(1)) {
return wrap(dispatch_abs(r.tensor(0)));
} else {
return wrap(dispatch_abs(r.tensor(0), r.tensor(1)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_abs_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"abs_(Tensor input)",
}, /*traceable=*/true);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_abs_(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_acos(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"acos(Tensor input, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(1)) {
return wrap(dispatch_acos(r.tensor(0)));
} else {
return wrap(dispatch_acos(r.tensor(0), r.tensor(1)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_acos_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"acos_(Tensor input)",
}, /*traceable=*/true);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_acos_(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_adaptive_avg_pool1d(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"adaptive_avg_pool1d(Tensor input, IntArrayRef[1] output_size)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_adaptive_avg_pool1d(r.tensor(0), r.intlist(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_adaptive_max_pool1d(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"adaptive_max_pool1d(Tensor input, IntArrayRef[1] output_size)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_adaptive_max_pool1d(r.tensor(0), r.intlist(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"add(Tensor input, Scalar alpha, Tensor other, *, Tensor out=None)|deprecated",
"add(Tensor input, Tensor other, *, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(3)) {
return wrap(dispatch_add(r.tensor(0), r.scalar(1), r.tensor(2)));
} else {
return wrap(dispatch_add(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3)));
}
} else if (r.idx == 1) {
if (r.isNone(3)) {
return wrap(dispatch_add(r.tensor(0), r.tensor(1), r.scalar(2)));
} else {
return wrap(dispatch_add(r.tensor(0), r.tensor(1), r.scalar(2), r.tensor(3)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addbmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addbmm(Scalar beta, Tensor input, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor out=None)|deprecated",
"addbmm(Scalar beta, Tensor input, Tensor batch1, Tensor batch2, *, Tensor out=None)|deprecated",
"addbmm(Tensor input, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch_addbmm(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4)));
} else {
return wrap(dispatch_addbmm(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4), r.tensor(5)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_addbmm(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_addbmm(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 2) {
if (r.isNone(5)) {
return wrap(dispatch_addbmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch_addbmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addcdiv(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addcdiv(Tensor input, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor out=None)|deprecated",
"addcdiv(Tensor input, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(4)) {
return wrap(dispatch_addcdiv(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_addcdiv(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_addcdiv(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3)));
} else {
return wrap(dispatch_addcdiv(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.tensor(4)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addcmul(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addcmul(Tensor input, Scalar value, Tensor tensor1, Tensor tensor2, *, Tensor out=None)|deprecated",
"addcmul(Tensor input, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(4)) {
return wrap(dispatch_addcmul(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_addcmul(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_addcmul(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3)));
} else {
return wrap(dispatch_addcmul(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.tensor(4)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmm(Scalar beta, Tensor input, Scalar alpha, Tensor mat1, Tensor mat2, *, Tensor out=None)|deprecated",
"addmm(Scalar beta, Tensor input, Tensor mat1, Tensor mat2, *, Tensor out=None)|deprecated",
"addmm(Tensor input, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch_addmm(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4)));
} else {
return wrap(dispatch_addmm(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4), r.tensor(5)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_addmm(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_addmm(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 2) {
if (r.isNone(5)) {
return wrap(dispatch_addmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch_addmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmv(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmv(Scalar beta, Tensor input, Scalar alpha, Tensor mat, Tensor vec, *, Tensor out=None)|deprecated",
"addmv(Scalar beta, Tensor input, Tensor mat, Tensor vec, *, Tensor out=None)|deprecated",
"addmv(Tensor input, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch_addmv(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4)));
} else {
return wrap(dispatch_addmv(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4), r.tensor(5)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_addmv(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_addmv(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 2) {
if (r.isNone(5)) {
return wrap(dispatch_addmv(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch_addmv(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addmv_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addmv_(Scalar beta, Tensor input, Scalar alpha, Tensor mat, Tensor vec)|deprecated",
"addmv_(Scalar beta, Tensor input, Tensor mat, Tensor vec)|deprecated",
"addmv_(Tensor input, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_addmv_(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4)));
} else if (r.idx == 1) {
return wrap(dispatch_addmv_(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3)));
} else if (r.idx == 2) {
return wrap(dispatch_addmv_(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_addr(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"addr(Scalar beta, Tensor input, Scalar alpha, Tensor vec1, Tensor vec2, *, Tensor out=None)|deprecated",
"addr(Scalar beta, Tensor input, Tensor vec1, Tensor vec2, *, Tensor out=None)|deprecated",
"addr(Tensor input, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch_addr(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4)));
} else {
return wrap(dispatch_addr(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4), r.tensor(5)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_addr(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_addr(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 2) {
if (r.isNone(5)) {
return wrap(dispatch_addr(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch_addr(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_affine_grid_generator(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"affine_grid_generator(Tensor theta, IntArrayRef size)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_affine_grid_generator(r.tensor(0), r.intlist(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_all(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"all(Tensor input)",
"all(Tensor input, int64_t dim, bool keepdim=False, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_all(r.tensor(0)));
} else if (r.idx == 1) {
if (r.isNone(3)) {
return wrap(dispatch_all(r.tensor(0), r.toInt64(1), r.toBool(2)));
} else {
return wrap(dispatch_all(r.tensor(0), r.toInt64(1), r.toBool(2), r.tensor(3)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_allclose(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"allclose(Tensor input, Tensor other, double rtol=1e-05, double atol=1e-08, bool equal_nan=False)",
}, /*traceable=*/false);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_allclose(r.tensor(0), r.tensor(1), r.toDouble(2), r.toDouble(3), r.toBool(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_alpha_dropout(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"alpha_dropout(Tensor input, double p, bool train)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_alpha_dropout(r.tensor(0), r.toDouble(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_alpha_dropout_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"alpha_dropout_(Tensor input, double p, bool train)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_alpha_dropout_(r.tensor(0), r.toDouble(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_any(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"any(Tensor input)",
"any(Tensor input, int64_t dim, bool keepdim=False, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_any(r.tensor(0)));
} else if (r.idx == 1) {
if (r.isNone(3)) {
return wrap(dispatch_any(r.tensor(0), r.toInt64(1), r.toBool(2)));
} else {
return wrap(dispatch_any(r.tensor(0), r.toInt64(1), r.toBool(2), r.tensor(3)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_argmax(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"argmax(Tensor input, int64_t? dim=c10::nullopt, bool keepdim=False)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_argmax(r.tensor(0), r.toInt64Optional(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_argmin(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"argmin(Tensor input, int64_t? dim=c10::nullopt, bool keepdim=False)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_argmin(r.tensor(0), r.toInt64Optional(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_argsort(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"argsort(Tensor input, int64_t dim=-1, bool descending=False)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_argsort(r.tensor(0), r.toInt64(1), r.toBool(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_as_strided(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"as_strided(Tensor input, IntArrayRef size, IntArrayRef stride, int64_t? storage_offset=c10::nullopt)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_as_strided(r.tensor(0), r.intlist(1), r.intlist(2), r.toInt64Optional(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_as_strided_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"as_strided_(Tensor input, IntArrayRef size, IntArrayRef stride, int64_t? storage_offset=c10::nullopt)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_as_strided_(r.tensor(0), r.intlist(1), r.intlist(2), r.toInt64Optional(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_asin(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"asin(Tensor input, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(1)) {
return wrap(dispatch_asin(r.tensor(0)));
} else {
return wrap(dispatch_asin(r.tensor(0), r.tensor(1)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_asin_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"asin_(Tensor input)",
}, /*traceable=*/true);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_asin_(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"atan(Tensor input, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(1)) {
return wrap(dispatch_atan(r.tensor(0)));
} else {
return wrap(dispatch_atan(r.tensor(0), r.tensor(1)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan2(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"atan2(Tensor input, Tensor other, *, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(2)) {
return wrap(dispatch_atan2(r.tensor(0), r.tensor(1)));
} else {
return wrap(dispatch_atan2(r.tensor(0), r.tensor(1), r.tensor(2)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_atan_(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"atan_(Tensor input)",
}, /*traceable=*/true);
ParsedArgs<1> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_atan_(r.tensor(0)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_avg_pool1d(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"avg_pool1d(Tensor input, IntArrayRef[1] kernel_size, IntArrayRef[1] stride=None, IntArrayRef[1] padding=0, bool ceil_mode=False, bool count_include_pad=True)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_avg_pool1d(r.tensor(0), r.intlist(1), r.intlist(2), r.intlist(3), r.toBool(4), r.toBool(5)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_baddbmm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"baddbmm(Scalar beta, Tensor input, Scalar alpha, Tensor batch1, Tensor batch2, *, Tensor out=None)|deprecated",
"baddbmm(Scalar beta, Tensor input, Tensor batch1, Tensor batch2, *, Tensor out=None)|deprecated",
"baddbmm(Tensor input, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(5)) {
return wrap(dispatch_baddbmm(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4)));
} else {
return wrap(dispatch_baddbmm(r.scalar(0), r.tensor(1), r.scalar(2), r.tensor(3), r.tensor(4), r.tensor(5)));
}
} else if (r.idx == 1) {
if (r.isNone(4)) {
return wrap(dispatch_baddbmm(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3)));
} else {
return wrap(dispatch_baddbmm(r.scalar(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4)));
}
} else if (r.idx == 2) {
if (r.isNone(5)) {
return wrap(dispatch_baddbmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4)));
} else {
return wrap(dispatch_baddbmm(r.tensor(0), r.tensor(1), r.tensor(2), r.scalar(3), r.scalar(4), r.tensor(5)));
}
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bartlett_window(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bartlett_window(int64_t window_length, *, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)",
"bartlett_window(int64_t window_length, bool periodic, *, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)",
}, /*traceable=*/true);
ParsedArgs<8> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
auto window_length = r.toInt64(0);
auto dtype = r.scalartype(1);
auto device = r.device(3);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(2).layout)
.requires_grad(r.toBool(5))
.pinned_memory(r.toBool(4));
return wrap(dispatch_bartlett_window(window_length, options));
} else if (r.idx == 1) {
auto window_length = r.toInt64(0);
auto periodic = r.toBool(1);
auto dtype = r.scalartype(2);
auto device = r.device(4);
const auto options = TensorOptions()
.dtype(dtype)
.device(device)
.layout(r.layout(3).layout)
.requires_grad(r.toBool(6))
.pinned_memory(r.toBool(5));
return wrap(dispatch_bartlett_window(window_length, periodic, options));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, double momentum, double eps, bool cudnn_enabled)",
}, /*traceable=*/true);
ParsedArgs<9> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4), r.toBool(5), r.toDouble(6), r.toDouble(7), r.toBool(8)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm_backward_elemt(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu)",
}, /*traceable=*/true);
ParsedArgs<7> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm_backward_elemt(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4), r.tensor(5), r.tensor(6)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm_backward_reduce(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, bool input_g, bool weight_g, bool bias_g)",
}, /*traceable=*/true);
ParsedArgs<7> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm_backward_reduce(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.toBool(4), r.toBool(5), r.toBool(6)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm_elemt(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, double eps)",
}, /*traceable=*/true);
ParsedArgs<6> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm_elemt(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4), r.toDouble(5)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm_gather_stats(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, double momentum, double eps, int64_t count)",
}, /*traceable=*/true);
ParsedArgs<8> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm_gather_stats(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.tensor(4), r.toDouble(5), r.toDouble(6), r.toInt64(7)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm_stats(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm_stats(Tensor input, double eps)",
}, /*traceable=*/true);
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm_stats(r.tensor(0), r.toDouble(1)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_batch_norm_update_stats(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, double momentum)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_batch_norm_update_stats(r.tensor(0), r.tensor(1), r.tensor(2), r.toDouble(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bernoulli(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bernoulli(Tensor input, *, Generator generator=None, Tensor out=None)",
"bernoulli(Tensor input, double p, *, Generator generator=None)",
}, /*traceable=*/true);
ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(2)) {
return wrap(dispatch_bernoulli(r.tensor(0), r.generator(1)));
} else {
return wrap(dispatch_bernoulli(r.tensor(0), r.generator(1), r.tensor(2)));
}
} else if (r.idx == 1) {
return wrap(dispatch_bernoulli(r.tensor(0), r.toDouble(1), r.generator(2)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bilinear(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias)",
}, /*traceable=*/true);
ParsedArgs<4> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_bilinear(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_binary_cross_entropy_with_logits(PyObject* self_, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"binary_cross_entropy_with_logits(Tensor input, Tensor target, Tensor? weight, Tensor? pos_weight, int64_t reduction)",
}, /*traceable=*/true);
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
return wrap(dispatch_binary_cross_entropy_with_logits(r.tensor(0), r.tensor(1), r.tensor(2), r.tensor(3), r.toInt64(4)));
}
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject * THPVariable_bincount(PyObject* self_, PyObject* args, PyObject* kwargs)
{