Skip to content

Instantly share code, notes, and snippets.

@ngoldbaum
Created February 13, 2020 02:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ngoldbaum/ebe6a0ad1fb91a0c701dc43ffbd65aee to your computer and use it in GitHub Desktop.
Save ngoldbaum/ebe6a0ad1fb91a0c701dc43ffbd65aee to your computer and use it in GitHub Desktop.
--- python_nn_functions_old.cpp 2020-02-12 20:49:49.470685646 -0600
+++ python_nn_functions_new.cpp 2020-02-12 20:50:21.507428604 -0600
@@ -53,6 +53,160 @@
END_HANDLE_TH_ERRORS
}
+// generated forward declarations start here
+
+static PyObject * THPVariable_adaptive_avg_pool2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_adaptive_avg_pool3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_adaptive_max_pool2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_adaptive_max_pool3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_avg_pool2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_avg_pool3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_binary_cross_entropy(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_col2im(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_elu(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_elu_(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_fractional_max_pool2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_fractional_max_pool3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_gelu(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_glu(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_hardtanh(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_hardtanh_(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_im2col(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_l1_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_leaky_relu(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_leaky_relu_(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_linear(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_log_sigmoid(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_max_pool2d_with_indices(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_max_pool3d_with_indices(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_max_unpool2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_max_unpool3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_mkldnn_linear(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_mkldnn_reorder_conv2d_weight(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_mse_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_multi_margin_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_multilabel_margin_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_nll_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_nll_loss2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_one_hot(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_reflection_pad1d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_reflection_pad2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_replication_pad1d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_replication_pad2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_replication_pad3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_rrelu_with_noise(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_rrelu_with_noise_(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_slow_conv3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_slow_conv_dilated2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_slow_conv_dilated3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_slow_conv_transpose2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_slow_conv_transpose3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_smooth_l1_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_soft_margin_loss(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_softplus(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_softshrink(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_thnn_conv2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_thnn_conv_depthwise2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_bicubic2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_bilinear2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_linear1d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_nearest1d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_nearest2d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_nearest3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+static PyObject * THPVariable_upsample_trilinear3d(PyObject* self_, PyObject* args, PyObject* kwargs);
+
+static PyMethodDef nn_functions[] = {
+ {"_parse_to", (PyCFunction)(void(*)(void))THPVariable__parse_to, METH_VARARGS | METH_KEYWORDS, nullptr},
+ {"adaptive_avg_pool2d", (PyCFunction)(void(*)(void))THPVariable_adaptive_avg_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"adaptive_avg_pool3d", (PyCFunction)(void(*)(void))THPVariable_adaptive_avg_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"adaptive_max_pool2d", (PyCFunction)(void(*)(void))THPVariable_adaptive_max_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"adaptive_max_pool3d", (PyCFunction)(void(*)(void))THPVariable_adaptive_max_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"avg_pool2d", (PyCFunction)(void(*)(void))THPVariable_avg_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"avg_pool3d", (PyCFunction)(void(*)(void))THPVariable_avg_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"binary_cross_entropy", (PyCFunction)(void(*)(void))THPVariable_binary_cross_entropy, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"col2im", (PyCFunction)(void(*)(void))THPVariable_col2im, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"elu", (PyCFunction)(void(*)(void))THPVariable_elu, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"elu_", (PyCFunction)(void(*)(void))THPVariable_elu_, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"fractional_max_pool2d", (PyCFunction)(void(*)(void))THPVariable_fractional_max_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"fractional_max_pool3d", (PyCFunction)(void(*)(void))THPVariable_fractional_max_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"gelu", (PyCFunction)(void(*)(void))THPVariable_gelu, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"glu", (PyCFunction)(void(*)(void))THPVariable_glu, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"hardtanh", (PyCFunction)(void(*)(void))THPVariable_hardtanh, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"hardtanh_", (PyCFunction)(void(*)(void))THPVariable_hardtanh_, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"im2col", (PyCFunction)(void(*)(void))THPVariable_im2col, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"l1_loss", (PyCFunction)(void(*)(void))THPVariable_l1_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"leaky_relu", (PyCFunction)(void(*)(void))THPVariable_leaky_relu, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"leaky_relu_", (PyCFunction)(void(*)(void))THPVariable_leaky_relu_, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"linear", (PyCFunction)(void(*)(void))THPVariable_linear, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"log_sigmoid", (PyCFunction)(void(*)(void))THPVariable_log_sigmoid, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"max_pool2d_with_indices", (PyCFunction)(void(*)(void))THPVariable_max_pool2d_with_indices, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"max_pool3d_with_indices", (PyCFunction)(void(*)(void))THPVariable_max_pool3d_with_indices, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"max_unpool2d", (PyCFunction)(void(*)(void))THPVariable_max_unpool2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"max_unpool3d", (PyCFunction)(void(*)(void))THPVariable_max_unpool3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"mkldnn_linear", (PyCFunction)(void(*)(void))THPVariable_mkldnn_linear, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"mkldnn_reorder_conv2d_weight", (PyCFunction)(void(*)(void))THPVariable_mkldnn_reorder_conv2d_weight, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"mse_loss", (PyCFunction)(void(*)(void))THPVariable_mse_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"multi_margin_loss", (PyCFunction)(void(*)(void))THPVariable_multi_margin_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"multilabel_margin_loss", (PyCFunction)(void(*)(void))THPVariable_multilabel_margin_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"nll_loss", (PyCFunction)(void(*)(void))THPVariable_nll_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"nll_loss2d", (PyCFunction)(void(*)(void))THPVariable_nll_loss2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"one_hot", (PyCFunction)(void(*)(void))THPVariable_one_hot, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"reflection_pad1d", (PyCFunction)(void(*)(void))THPVariable_reflection_pad1d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"reflection_pad2d", (PyCFunction)(void(*)(void))THPVariable_reflection_pad2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"replication_pad1d", (PyCFunction)(void(*)(void))THPVariable_replication_pad1d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"replication_pad2d", (PyCFunction)(void(*)(void))THPVariable_replication_pad2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"replication_pad3d", (PyCFunction)(void(*)(void))THPVariable_replication_pad3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"rrelu_with_noise", (PyCFunction)(void(*)(void))THPVariable_rrelu_with_noise, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"rrelu_with_noise_", (PyCFunction)(void(*)(void))THPVariable_rrelu_with_noise_, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"slow_conv3d", (PyCFunction)(void(*)(void))THPVariable_slow_conv3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"slow_conv_dilated2d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_dilated2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"slow_conv_dilated3d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_dilated3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"slow_conv_transpose2d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_transpose2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"slow_conv_transpose3d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_transpose3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"smooth_l1_loss", (PyCFunction)(void(*)(void))THPVariable_smooth_l1_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"soft_margin_loss", (PyCFunction)(void(*)(void))THPVariable_soft_margin_loss, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"softplus", (PyCFunction)(void(*)(void))THPVariable_softplus, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"softshrink", (PyCFunction)(void(*)(void))THPVariable_softshrink, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"thnn_conv2d", (PyCFunction)(void(*)(void))THPVariable_thnn_conv2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"thnn_conv_depthwise2d", (PyCFunction)(void(*)(void))THPVariable_thnn_conv_depthwise2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_bicubic2d", (PyCFunction)(void(*)(void))THPVariable_upsample_bicubic2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_bilinear2d", (PyCFunction)(void(*)(void))THPVariable_upsample_bilinear2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_linear1d", (PyCFunction)(void(*)(void))THPVariable_upsample_linear1d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_nearest1d", (PyCFunction)(void(*)(void))THPVariable_upsample_nearest1d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_nearest2d", (PyCFunction)(void(*)(void))THPVariable_upsample_nearest2d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_nearest3d", (PyCFunction)(void(*)(void))THPVariable_upsample_nearest3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"upsample_trilinear3d", (PyCFunction)(void(*)(void))THPVariable_upsample_trilinear3d, METH_VARARGS | METH_KEYWORDS, NULL},
+ {NULL}
+};
+
+static PyObject* THPNNVariableFunctionsModule = NULL;
+
+void initNNFunctions(PyObject* module) {
+#if PY_MAJOR_VERSION == 2
+ PyObject* nn = Py_InitModule("torch._C._nn", nn_functions);
+ Py_XINCREF(nn); // Py_InitModule returns "borrowed" reference
+#else
+ static struct PyModuleDef def = {
+ PyModuleDef_HEAD_INIT,
+ "torch._C._nn",
+ NULL,
+ -1,
+ nn_functions
+ };
+ PyObject* nn = PyModule_Create(&def);
+#endif
+ THPNNVariableFunctionsModule = nn;
+ if (!nn) {
+ throw python_error();
+ }
+ // steals a reference to nn
+ if (PyModule_AddObject(module, "_nn", nn) != 0) {
+ throw python_error();
+ }
+}
+
+// generated methods start here
+
// adaptive_avg_pool2d
static PyObject * THPVariable_adaptive_avg_pool2d(PyObject* self_, PyObject* args, PyObject* kwargs)
{
@@ -63,7 +217,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
auto dispatch_adaptive_avg_pool2d = [](const Tensor & self, IntArrayRef output_size) -> Tensor {
@@ -93,7 +249,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor
auto dispatch_adaptive_avg_pool3d = [](const Tensor & self, IntArrayRef output_size) -> Tensor {
@@ -123,7 +281,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
auto dispatch_adaptive_max_pool2d = [](const Tensor & self, IntArrayRef output_size) -> std::tuple<Tensor,Tensor> {
@@ -154,7 +314,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
auto dispatch_adaptive_max_pool3d = [](const Tensor & self, IntArrayRef output_size) -> std::tuple<Tensor,Tensor> {
@@ -185,7 +347,9 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(7)) {
// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
auto dispatch_avg_pool2d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) -> Tensor {
@@ -215,7 +379,9 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(7)) {
// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
auto dispatch_avg_pool3d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) -> Tensor {
@@ -245,7 +411,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(4)) {
// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
auto dispatch_binary_cross_entropy = [](const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) -> Tensor {
@@ -275,7 +443,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
auto dispatch_col2im = [](const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) -> Tensor {
@@ -305,7 +475,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(4)) {
// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
auto dispatch_elu = [](const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor {
@@ -335,7 +507,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
auto dispatch_elu_ = [](Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -356,7 +530,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(4)) {
// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
auto dispatch_fractional_max_pool2d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) -> std::tuple<Tensor,Tensor> {
@@ -387,7 +563,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(4)) {
// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
auto dispatch_fractional_max_pool3d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) -> std::tuple<Tensor,Tensor> {
@@ -418,7 +596,9 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::gelu(Tensor self) -> Tensor
auto dispatch_gelu = [](const Tensor & self) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -439,7 +619,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::glu(Tensor self, int dim=-1) -> Tensor
auto dispatch_glu = [](const Tensor & self, int64_t dim) -> Tensor {
@@ -469,7 +651,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
auto dispatch_hardtanh = [](const Tensor & self, Scalar min_val, Scalar max_val) -> Tensor {
@@ -499,7 +683,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
auto dispatch_hardtanh_ = [](Tensor self, Scalar min_val, Scalar max_val) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -520,7 +706,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
auto dispatch_im2col = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) -> Tensor {
@@ -550,7 +738,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
auto dispatch_l1_loss = [](const Tensor & self, const Tensor & target, int64_t reduction) -> Tensor {
@@ -580,7 +770,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
auto dispatch_leaky_relu = [](const Tensor & self, Scalar negative_slope) -> Tensor {
@@ -610,7 +802,9 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
auto dispatch_leaky_relu_ = [](Tensor self, Scalar negative_slope) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -631,7 +825,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
auto dispatch_linear = [](const Tensor & input, const Tensor & weight, const Tensor & bias) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -652,7 +848,9 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(1)) {
// aten::log_sigmoid(Tensor self) -> Tensor
auto dispatch_log_sigmoid = [](const Tensor & self) -> Tensor {
@@ -682,7 +880,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
auto dispatch_max_pool2d_with_indices = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> std::tuple<Tensor,Tensor> {
@@ -713,7 +913,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
auto dispatch_max_pool3d_with_indices = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> std::tuple<Tensor,Tensor> {
@@ -744,7 +946,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
auto dispatch_max_unpool2d = [](const Tensor & self, const Tensor & indices, IntArrayRef output_size) -> Tensor {
@@ -774,7 +978,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
auto dispatch_max_unpool3d = [](const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor {
@@ -804,7 +1010,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::mkldnn_linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
auto dispatch_mkldnn_linear = [](const Tensor & input, const Tensor & weight, const Tensor & bias) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -825,7 +1033,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor
auto dispatch_mkldnn_reorder_conv2d_weight = [](const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -846,7 +1056,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
auto dispatch_mse_loss = [](const Tensor & self, const Tensor & target, int64_t reduction) -> Tensor {
@@ -876,7 +1088,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
auto dispatch_multi_margin_loss = [](const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) -> Tensor {
@@ -906,7 +1120,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
auto dispatch_multilabel_margin_loss = [](const Tensor & self, const Tensor & target, int64_t reduction) -> Tensor {
@@ -936,7 +1152,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
auto dispatch_nll_loss = [](const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) -> Tensor {
@@ -966,7 +1184,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
auto dispatch_nll_loss2d = [](const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) -> Tensor {
@@ -996,7 +1216,9 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
auto dispatch_one_hot = [](const Tensor & self, int64_t num_classes) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -1017,7 +1239,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::reflection_pad1d(Tensor self, int[2] padding) -> Tensor
auto dispatch_reflection_pad1d = [](const Tensor & self, IntArrayRef padding) -> Tensor {
@@ -1047,7 +1271,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::reflection_pad2d(Tensor self, int[4] padding) -> Tensor
auto dispatch_reflection_pad2d = [](const Tensor & self, IntArrayRef padding) -> Tensor {
@@ -1077,7 +1303,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::replication_pad1d(Tensor self, int[2] padding) -> Tensor
auto dispatch_replication_pad1d = [](const Tensor & self, IntArrayRef padding) -> Tensor {
@@ -1107,7 +1335,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::replication_pad2d(Tensor self, int[4] padding) -> Tensor
auto dispatch_replication_pad2d = [](const Tensor & self, IntArrayRef padding) -> Tensor {
@@ -1137,7 +1367,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::replication_pad3d(Tensor self, int[6] padding) -> Tensor
auto dispatch_replication_pad3d = [](const Tensor & self, IntArrayRef padding) -> Tensor {
@@ -1167,7 +1399,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
auto dispatch_rrelu_with_noise = [](const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor {
@@ -1197,7 +1431,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
auto dispatch_rrelu_with_noise_ = [](Tensor self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -1218,7 +1454,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor
auto dispatch_slow_conv3d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) -> Tensor {
@@ -1248,7 +1486,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
auto dispatch_slow_conv_dilated2d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -1269,7 +1509,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor
auto dispatch_slow_conv_dilated3d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor {
pybind11::gil_scoped_release no_gil;
@@ -1290,7 +1532,9 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(8)) {
// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor
auto dispatch_slow_conv_transpose2d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor {
@@ -1320,7 +1564,9 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(8)) {
// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor
auto dispatch_slow_conv_transpose3d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor {
@@ -1350,7 +1596,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
auto dispatch_smooth_l1_loss = [](const Tensor & self, const Tensor & target, int64_t reduction) -> Tensor {
@@ -1380,7 +1628,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
auto dispatch_soft_margin_loss = [](const Tensor & self, const Tensor & target, int64_t reduction) -> Tensor {
@@ -1410,7 +1660,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
auto dispatch_softplus = [](const Tensor & self, Scalar beta, Scalar threshold) -> Tensor {
@@ -1440,7 +1692,9 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(2)) {
// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
auto dispatch_softshrink = [](const Tensor & self, Scalar lambd) -> Tensor {
@@ -1470,7 +1724,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
auto dispatch_thnn_conv2d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) -> Tensor {
@@ -1500,7 +1756,9 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(7)) {
// aten::thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
auto dispatch_thnn_conv_depthwise2d = [](const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor {
@@ -1530,7 +1788,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
auto dispatch_upsample_bicubic2d = [](const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) -> Tensor {
@@ -1560,7 +1820,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
auto dispatch_upsample_bilinear2d = [](const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) -> Tensor {
@@ -1590,7 +1852,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(4)) {
// aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor
auto dispatch_upsample_linear1d = [](const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) -> Tensor {
@@ -1620,7 +1884,9 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(3)) {
// aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor
auto dispatch_upsample_nearest1d = [](const Tensor & self, IntArrayRef output_size, c10::optional<double> scales) -> Tensor {
@@ -1650,7 +1916,9 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(4)) {
// aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
auto dispatch_upsample_nearest2d = [](const Tensor & self, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) -> Tensor {
@@ -1680,7 +1948,9 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(5)) {
// aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
auto dispatch_upsample_nearest3d = [](const Tensor & self, IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) -> Tensor {
@@ -1710,7 +1980,9 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
-
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn");
+ }
if (_r.isNone(6)) {
// aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
auto dispatch_upsample_trilinear3d = [](const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) -> Tensor {
@@ -1730,91 +2002,4 @@
END_HANDLE_TH_ERRORS
}
-static PyMethodDef nn_functions[] = {
- {"_parse_to", (PyCFunction)(void(*)(void))THPVariable__parse_to, METH_VARARGS | METH_KEYWORDS, nullptr},
- {"adaptive_avg_pool2d", (PyCFunction)(void(*)(void))THPVariable_adaptive_avg_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"adaptive_avg_pool3d", (PyCFunction)(void(*)(void))THPVariable_adaptive_avg_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"adaptive_max_pool2d", (PyCFunction)(void(*)(void))THPVariable_adaptive_max_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"adaptive_max_pool3d", (PyCFunction)(void(*)(void))THPVariable_adaptive_max_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"avg_pool2d", (PyCFunction)(void(*)(void))THPVariable_avg_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"avg_pool3d", (PyCFunction)(void(*)(void))THPVariable_avg_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"binary_cross_entropy", (PyCFunction)(void(*)(void))THPVariable_binary_cross_entropy, METH_VARARGS | METH_KEYWORDS, NULL},
- {"col2im", (PyCFunction)(void(*)(void))THPVariable_col2im, METH_VARARGS | METH_KEYWORDS, NULL},
- {"elu", (PyCFunction)(void(*)(void))THPVariable_elu, METH_VARARGS | METH_KEYWORDS, NULL},
- {"elu_", (PyCFunction)(void(*)(void))THPVariable_elu_, METH_VARARGS | METH_KEYWORDS, NULL},
- {"fractional_max_pool2d", (PyCFunction)(void(*)(void))THPVariable_fractional_max_pool2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"fractional_max_pool3d", (PyCFunction)(void(*)(void))THPVariable_fractional_max_pool3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"gelu", (PyCFunction)(void(*)(void))THPVariable_gelu, METH_VARARGS | METH_KEYWORDS, NULL},
- {"glu", (PyCFunction)(void(*)(void))THPVariable_glu, METH_VARARGS | METH_KEYWORDS, NULL},
- {"hardtanh", (PyCFunction)(void(*)(void))THPVariable_hardtanh, METH_VARARGS | METH_KEYWORDS, NULL},
- {"hardtanh_", (PyCFunction)(void(*)(void))THPVariable_hardtanh_, METH_VARARGS | METH_KEYWORDS, NULL},
- {"im2col", (PyCFunction)(void(*)(void))THPVariable_im2col, METH_VARARGS | METH_KEYWORDS, NULL},
- {"l1_loss", (PyCFunction)(void(*)(void))THPVariable_l1_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"leaky_relu", (PyCFunction)(void(*)(void))THPVariable_leaky_relu, METH_VARARGS | METH_KEYWORDS, NULL},
- {"leaky_relu_", (PyCFunction)(void(*)(void))THPVariable_leaky_relu_, METH_VARARGS | METH_KEYWORDS, NULL},
- {"linear", (PyCFunction)(void(*)(void))THPVariable_linear, METH_VARARGS | METH_KEYWORDS, NULL},
- {"log_sigmoid", (PyCFunction)(void(*)(void))THPVariable_log_sigmoid, METH_VARARGS | METH_KEYWORDS, NULL},
- {"max_pool2d_with_indices", (PyCFunction)(void(*)(void))THPVariable_max_pool2d_with_indices, METH_VARARGS | METH_KEYWORDS, NULL},
- {"max_pool3d_with_indices", (PyCFunction)(void(*)(void))THPVariable_max_pool3d_with_indices, METH_VARARGS | METH_KEYWORDS, NULL},
- {"max_unpool2d", (PyCFunction)(void(*)(void))THPVariable_max_unpool2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"max_unpool3d", (PyCFunction)(void(*)(void))THPVariable_max_unpool3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"mkldnn_linear", (PyCFunction)(void(*)(void))THPVariable_mkldnn_linear, METH_VARARGS | METH_KEYWORDS, NULL},
- {"mkldnn_reorder_conv2d_weight", (PyCFunction)(void(*)(void))THPVariable_mkldnn_reorder_conv2d_weight, METH_VARARGS | METH_KEYWORDS, NULL},
- {"mse_loss", (PyCFunction)(void(*)(void))THPVariable_mse_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"multi_margin_loss", (PyCFunction)(void(*)(void))THPVariable_multi_margin_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"multilabel_margin_loss", (PyCFunction)(void(*)(void))THPVariable_multilabel_margin_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"nll_loss", (PyCFunction)(void(*)(void))THPVariable_nll_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"nll_loss2d", (PyCFunction)(void(*)(void))THPVariable_nll_loss2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"one_hot", (PyCFunction)(void(*)(void))THPVariable_one_hot, METH_VARARGS | METH_KEYWORDS, NULL},
- {"reflection_pad1d", (PyCFunction)(void(*)(void))THPVariable_reflection_pad1d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"reflection_pad2d", (PyCFunction)(void(*)(void))THPVariable_reflection_pad2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"replication_pad1d", (PyCFunction)(void(*)(void))THPVariable_replication_pad1d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"replication_pad2d", (PyCFunction)(void(*)(void))THPVariable_replication_pad2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"replication_pad3d", (PyCFunction)(void(*)(void))THPVariable_replication_pad3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"rrelu_with_noise", (PyCFunction)(void(*)(void))THPVariable_rrelu_with_noise, METH_VARARGS | METH_KEYWORDS, NULL},
- {"rrelu_with_noise_", (PyCFunction)(void(*)(void))THPVariable_rrelu_with_noise_, METH_VARARGS | METH_KEYWORDS, NULL},
- {"slow_conv3d", (PyCFunction)(void(*)(void))THPVariable_slow_conv3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"slow_conv_dilated2d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_dilated2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"slow_conv_dilated3d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_dilated3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"slow_conv_transpose2d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_transpose2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"slow_conv_transpose3d", (PyCFunction)(void(*)(void))THPVariable_slow_conv_transpose3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"smooth_l1_loss", (PyCFunction)(void(*)(void))THPVariable_smooth_l1_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"soft_margin_loss", (PyCFunction)(void(*)(void))THPVariable_soft_margin_loss, METH_VARARGS | METH_KEYWORDS, NULL},
- {"softplus", (PyCFunction)(void(*)(void))THPVariable_softplus, METH_VARARGS | METH_KEYWORDS, NULL},
- {"softshrink", (PyCFunction)(void(*)(void))THPVariable_softshrink, METH_VARARGS | METH_KEYWORDS, NULL},
- {"thnn_conv2d", (PyCFunction)(void(*)(void))THPVariable_thnn_conv2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"thnn_conv_depthwise2d", (PyCFunction)(void(*)(void))THPVariable_thnn_conv_depthwise2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_bicubic2d", (PyCFunction)(void(*)(void))THPVariable_upsample_bicubic2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_bilinear2d", (PyCFunction)(void(*)(void))THPVariable_upsample_bilinear2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_linear1d", (PyCFunction)(void(*)(void))THPVariable_upsample_linear1d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_nearest1d", (PyCFunction)(void(*)(void))THPVariable_upsample_nearest1d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_nearest2d", (PyCFunction)(void(*)(void))THPVariable_upsample_nearest2d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_nearest3d", (PyCFunction)(void(*)(void))THPVariable_upsample_nearest3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {"upsample_trilinear3d", (PyCFunction)(void(*)(void))THPVariable_upsample_trilinear3d, METH_VARARGS | METH_KEYWORDS, NULL},
- {NULL}
-};
-
-void initNNFunctions(PyObject* module) {
-#if PY_MAJOR_VERSION == 2
- PyObject* nn = Py_InitModule("torch._C._nn", nn_functions);
- Py_XINCREF(nn); // Py_InitModule returns "borrowed" reference
-#else
- static struct PyModuleDef def = {
- PyModuleDef_HEAD_INIT,
- "torch._C._nn",
- NULL,
- -1,
- nn_functions
- };
- PyObject* nn = PyModule_Create(&def);
-#endif
- if (!nn) {
- throw python_error();
- }
- // steals a reference to nn
- if (PyModule_AddObject(module, "_nn", nn) != 0) {
- throw python_error();
- }
-}
-
}} // namespace torch::autograd
--- python_torch_functions_old.cpp 2020-02-12 20:49:34.162332605 -0600
+++ python_torch_functions_new.cpp 2020-02-12 20:50:42.551919554 -0600
@@ -1413,88 +1413,19 @@
0 /* tp_new */
};
+static PyObject* THPVariableFunctionsModule = NULL;
+
void initTorchFunctions(PyObject* module) {
if (PyType_Ready(&THPVariableFunctions) < 0) {
throw python_error();
}
Py_INCREF(&THPVariableFunctions);
- if (PyModule_AddObject(module, "_VariableFunctions", (PyObject*)&THPVariableFunctions) < 0) {
- throw python_error();
- }
-}
-
-/*
- *
- * Calls __torch_function__ on the overloaded arguments to a torch API
- * function in order of precedence, returning the first result that is
- * not NotImplemented. If all arguments return NotImplemented, raises a
- * TypeError.
- *
- * Assumes overloaded_args has at least one entry. All entries must have
- * a __torch_function__ attribute that resolves to a callable that
- * accepts a torch API function, arguments, and keyword arguments for
- * the torch API function.
- *
- * It is sufficient to call PythonArgs::has_torch_function before
- * calling this function to verify that there are valid arguments
- * present. If that is not done then special care must be taken to
- * ensure there are arguments that are overloaded with
- * __torch_function__.
- *
- * See torch._overrides._implement_torch_function for the equivalent
- * code in the pure-python implementation.
- *
- * 'r' is a parsed PythonArgs instance, returned from
- * PythonArgParser::parse.
- *
- * 'args' is a reference to the python tuple of arguments to the torch
- * API function.
- *
- * 'kwargs' is a reference to the python dict of keyword arguments to
- * the torch API function.
- *
- * 'torch_api' is a reference to python torch API namespace.
- *
- */
-
-PyObject* handle_torch_function(PythonArgs &r, PyObject* args, PyObject* kwargs, PyTypeObject &torch_api) {
- py::object torch_api_function = PyObject_FastGetAttrString((PyObject*)&torch_api, const_cast<char*>(r.get_func_name().data()));
- TORCH_INTERNAL_ASSERT(torch_api_function.ptr() != NULL, "torch API function must exist");
- py::object ret;
- for (auto &arg : r.signature.overloaded_args) {
- py::object torch_function = PyObject_FastGetAttrString(arg.ptr(), "__torch_function__");
- ret = py::reinterpret_steal<py::object>(PyObject_CallFunctionObjArgs(torch_function.ptr(), torch_api_function.ptr(), args, kwargs, NULL));
- if (ret.ptr() != Py_NotImplemented) {
- // Return the reference to the result. This also covers the case where ret
- // is NULL and __torch_function__ raised an exception, which we throw below
- break;
- }
- }
- if (ret.ptr() == nullptr) {
- // if an exception occurred in a user's implementation of
- // __array_function__, throw it
- throw python_error();
- }
- else if (ret.ptr() == Py_NotImplemented) {
- // all __torch_function__ implementations in overloaded_args
- // returned NotImplemented, so we raise a TypeError.
- std::stringstream ss;
- ss << "no implementation found for 'torch." << r.get_func_name()
- << "' on types that implement __torch_function__: [";
- for (auto &arg : r.signature.overloaded_args) {
- ss << arg.ptr()->ob_type->tp_name;
- if (!arg.is(r.signature.overloaded_args.back())) {
- ss << ", ";
- }
- else {
- ss << "]";
- }
- }
- const std::string& tmp = ss.str();
- PyErr_SetString(PyExc_TypeError, tmp.c_str());
+ // PyType_GenericNew returns a new reference
+ THPVariableFunctionsModule = PyType_GenericNew(&THPVariableFunctions, Py_None, Py_None);
+ // PyModule_AddObject steals a reference
+ if (PyModule_AddObject(module, "_VariableFunctions", THPVariableFunctionsModule) < 0) {
throw python_error();
}
- return ret.release().ptr();
}
// generated methods start here
@@ -1511,8 +1442,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -1548,8 +1479,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -1585,8 +1516,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -1622,8 +1553,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -1659,8 +1590,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -1694,8 +1625,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
auto dispatch__adaptive_avg_pool2d = [](const Tensor & self, IntArrayRef output_size) -> Tensor {
@@ -1717,8 +1648,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(5)) {
// aten::_addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
@@ -1749,8 +1680,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
auto dispatch__addr_ = [](Tensor self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) -> Tensor {
@@ -1772,8 +1703,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_baddbmm_mkl_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
auto dispatch__baddbmm_mkl_ = [](Tensor self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) -> Tensor {
@@ -1795,8 +1726,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
auto dispatch__batch_norm_impl_index = [](const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) -> std::tuple<Tensor,Tensor,Tensor,Tensor,int64_t> {
@@ -1818,8 +1749,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Byte = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1841,8 +1772,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Char = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1864,8 +1795,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Double = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1887,8 +1818,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Float = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1910,8 +1841,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Half = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1933,8 +1864,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Int = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1956,8 +1887,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Long = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -1979,8 +1910,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
auto dispatch__cast_Short = [](const Tensor & self, bool non_blocking) -> Tensor {
@@ -2002,8 +1933,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::_cat(Tensor[] tensors, int dim=0) -> Tensor
@@ -2034,8 +1965,8 @@
ParsedArgs<12> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
auto dispatch__convolution = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor {
@@ -2057,8 +1988,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> Tensor
auto dispatch__convolution_nogroup = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding) -> Tensor {
@@ -2080,8 +2011,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
auto dispatch__copy_from = [](const Tensor & self, const Tensor & dst, bool non_blocking) -> Tensor {
@@ -2103,8 +2034,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
auto dispatch__ctc_loss = [](const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity) -> std::tuple<Tensor,Tensor> {
@@ -2126,8 +2057,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
auto dispatch__cudnn_ctc_loss = [](const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) -> std::tuple<Tensor,Tensor> {
@@ -2149,8 +2080,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor
const auto options = TensorOptions()
@@ -2179,8 +2110,8 @@
ParsedArgs<15> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
auto dispatch__cudnn_rnn = [](const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state) -> std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> {
@@ -2202,8 +2133,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
auto dispatch__cudnn_rnn_flatten_weight = [](TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional) -> Tensor {
@@ -2225,8 +2156,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cufft_clear_plan_cache(int device_index) -> ()
auto dispatch__cufft_clear_plan_cache = [](int64_t device_index) -> void {
@@ -2249,8 +2180,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cufft_get_plan_cache_max_size(int device_index) -> int
auto dispatch__cufft_get_plan_cache_max_size = [](int64_t device_index) -> int64_t {
@@ -2272,8 +2203,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cufft_get_plan_cache_size(int device_index) -> int
auto dispatch__cufft_get_plan_cache_size = [](int64_t device_index) -> int64_t {
@@ -2295,8 +2226,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
auto dispatch__cufft_set_plan_cache_max_size = [](int64_t device_index, int64_t max_size) -> void {
@@ -2319,8 +2250,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_debug_has_internal_overlap(Tensor self) -> int
auto dispatch__debug_has_internal_overlap = [](const Tensor & self) -> int64_t {
@@ -2342,8 +2273,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_dim_arange(Tensor like, int dim) -> Tensor
auto dispatch__dim_arange = [](const Tensor & like, int64_t dim) -> Tensor {
@@ -2365,8 +2296,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
auto dispatch__dirichlet_grad = [](const Tensor & x, const Tensor & alpha, const Tensor & total) -> Tensor {
@@ -2388,8 +2319,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
auto dispatch__embedding_bag = [](const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights, bool include_last_offset) -> std::tuple<Tensor,Tensor,Tensor,Tensor> {
@@ -2411,8 +2342,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
const auto options = TensorOptions()
@@ -2441,8 +2372,8 @@
ParsedArgs<10> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
const auto options = TensorOptions()
@@ -2471,8 +2402,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor
auto dispatch__fft_with_size = [](const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes) -> Tensor {
@@ -2494,8 +2425,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
auto dispatch__fused_dropout = [](const Tensor & self, double p, Generator * generator) -> std::tuple<Tensor,Tensor> {
@@ -2517,8 +2448,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
auto dispatch__has_compatible_shallow_copy_type = [](const Tensor & self, const Tensor & from) -> bool {
@@ -2540,8 +2471,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
auto dispatch__index_copy_ = [](Tensor self, int64_t dim, const Tensor & index, const Tensor & source) -> Tensor {
@@ -2563,8 +2494,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
auto dispatch__index_put_impl_ = [](Tensor self, TensorList indices, const Tensor & values, bool accumulate, bool unsafe) -> Tensor {
@@ -2586,8 +2517,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
auto dispatch__log_softmax = [](const Tensor & self, int64_t dim, bool half_to_float) -> Tensor {
@@ -2609,8 +2540,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
auto dispatch__log_softmax_backward_data = [](const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) -> Tensor {
@@ -2632,8 +2563,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_lu_solve_helper(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
auto dispatch__lu_solve_helper = [](const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) -> Tensor {
@@ -2655,8 +2586,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor)
auto dispatch__lu_with_info = [](const Tensor & self, bool pivot, bool check_errors) -> std::tuple<Tensor,Tensor,Tensor> {
@@ -2678,8 +2609,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
auto dispatch__make_per_channel_quantized_tensor = [](const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis) -> Tensor {
@@ -2701,8 +2632,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
auto dispatch__make_per_tensor_quantized_tensor = [](const Tensor & self, double scale, int64_t zero_point) -> Tensor {
@@ -2724,8 +2655,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
auto dispatch__masked_scale = [](const Tensor & self, const Tensor & mask, double scale) -> Tensor {
@@ -2747,8 +2678,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::_max(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
@@ -2780,8 +2711,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::_min(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
@@ -2813,8 +2744,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
auto dispatch__mkldnn_reshape = [](const Tensor & self, IntArrayRef shape) -> Tensor {
@@ -2836,8 +2767,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
auto dispatch__mkldnn_transpose = [](const Tensor & self, int64_t dim0, int64_t dim1) -> Tensor {
@@ -2859,8 +2790,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
auto dispatch__mkldnn_transpose_ = [](Tensor self, int64_t dim0, int64_t dim1) -> Tensor {
@@ -2882,8 +2813,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::_mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor)
@@ -2915,8 +2846,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_multinomial_alias_draw(Tensor J, Tensor q, int num_samples, *, Generator? generator=None) -> Tensor
auto dispatch__multinomial_alias_draw = [](const Tensor & J, const Tensor & q, int64_t num_samples, Generator * generator) -> Tensor {
@@ -2938,8 +2869,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_multinomial_alias_setup(Tensor probs) -> (Tensor, Tensor)
auto dispatch__multinomial_alias_setup = [](const Tensor & probs) -> std::tuple<Tensor,Tensor> {
@@ -2974,8 +2905,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor
auto dispatch__nnpack_spatial_convolution = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride) -> Tensor {
@@ -2997,8 +2928,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
auto dispatch__pack_padded_sequence = [](const Tensor & input, const Tensor & lengths, bool batch_first) -> std::tuple<Tensor,Tensor> {
@@ -3020,8 +2951,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
auto dispatch__pad_packed_sequence = [](const Tensor & data, const Tensor & batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length) -> std::tuple<Tensor,Tensor> {
@@ -3043,8 +2974,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
auto dispatch__reshape_from_tensor = [](const Tensor & self, const Tensor & shape) -> Tensor {
@@ -3066,8 +2997,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_s_where(Tensor condition, Tensor self, Tensor other) -> Tensor
auto dispatch__s_where = [](const Tensor & condition, const Tensor & self, const Tensor & other) -> Tensor {
@@ -3089,8 +3020,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
auto dispatch__sample_dirichlet = [](const Tensor & self, Generator * generator) -> Tensor {
@@ -3112,8 +3043,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_shape_as_tensor(Tensor self) -> Tensor
auto dispatch__shape_as_tensor = [](const Tensor & self) -> Tensor {
@@ -3135,8 +3066,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
auto dispatch__sobol_engine_draw = [](const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<ScalarType> dtype) -> std::tuple<Tensor,Tensor> {
@@ -3158,8 +3089,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
auto dispatch__sobol_engine_ff_ = [](Tensor self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated) -> Tensor {
@@ -3181,8 +3112,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
auto dispatch__sobol_engine_initialize_state_ = [](Tensor self, int64_t dimension) -> Tensor {
@@ -3204,8 +3135,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
auto dispatch__sobol_engine_scramble_ = [](Tensor self, const Tensor & ltm, int64_t dimension) -> Tensor {
@@ -3227,8 +3158,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
auto dispatch__softmax = [](const Tensor & self, int64_t dim, bool half_to_float) -> Tensor {
@@ -3250,8 +3181,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
auto dispatch__softmax_backward_data = [](const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) -> Tensor {
@@ -3273,8 +3204,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor
auto dispatch__sparse_addmm = [](const Tensor & self, const Tensor & sparse, const Tensor & dense, Scalar beta, Scalar alpha) -> Tensor {
@@ -3296,8 +3227,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
auto dispatch__sparse_mm = [](const Tensor & sparse, const Tensor & dense) -> Tensor {
@@ -3323,8 +3254,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -3374,8 +3305,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
auto dispatch__standard_gamma = [](const Tensor & self, Generator * generator) -> Tensor {
@@ -3397,8 +3328,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
auto dispatch__standard_gamma_grad = [](const Tensor & self, const Tensor & output) -> Tensor {
@@ -3420,8 +3351,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_std(Tensor self, bool unbiased=True) -> Tensor
auto dispatch__std = [](const Tensor & self, bool unbiased) -> Tensor {
@@ -3443,8 +3374,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
auto dispatch__trilinear = [](const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim) -> Tensor {
@@ -3466,8 +3397,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
auto dispatch__unique = [](const Tensor & self, bool sorted, bool return_inverse) -> std::tuple<Tensor,Tensor> {
@@ -3489,8 +3420,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
auto dispatch__unique2 = [](const Tensor & self, bool sorted, bool return_inverse, bool return_counts) -> std::tuple<Tensor,Tensor,Tensor> {
@@ -3512,8 +3443,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
auto dispatch__use_cudnn_ctc_loss = [](const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank) -> bool {
@@ -3535,8 +3466,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_var(Tensor self, bool unbiased=True) -> Tensor
auto dispatch__var = [](const Tensor & self, bool unbiased) -> Tensor {
@@ -3558,8 +3489,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
auto dispatch__weight_norm = [](const Tensor & v, const Tensor & g, int64_t dim) -> Tensor {
@@ -3581,8 +3512,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::_weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
auto dispatch__weight_norm_cuda_interface = [](const Tensor & v, const Tensor & g, int64_t dim) -> std::tuple<Tensor,Tensor> {
@@ -3604,8 +3535,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::abs(Tensor self) -> Tensor
@@ -3636,8 +3567,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::abs_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_abs_ = [](Tensor self) -> Tensor {
@@ -3659,8 +3590,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::acos(Tensor self) -> Tensor
@@ -3691,8 +3622,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::acos_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_acos_ = [](Tensor self) -> Tensor {
@@ -3714,8 +3645,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
auto dispatch_adaptive_avg_pool1d = [](const Tensor & self, IntArrayRef output_size) -> Tensor {
@@ -3737,8 +3668,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
auto dispatch_adaptive_max_pool1d = [](const Tensor & self, IntArrayRef output_size) -> std::tuple<Tensor,Tensor> {
@@ -3762,8 +3693,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -3818,8 +3749,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -3890,8 +3821,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -3945,8 +3876,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4001,8 +3932,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4074,8 +4005,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4147,8 +4078,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4193,8 +4124,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4263,8 +4194,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
auto dispatch_affine_grid_generator = [](const Tensor & theta, IntArrayRef size, bool align_corners) -> Tensor {
@@ -4286,8 +4217,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::align_tensors(Tensor[] tensors) -> Tensor[]
auto dispatch_align_tensors = [](TensorList tensors) -> std::vector<Tensor> {
@@ -4312,8 +4243,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4373,8 +4304,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
auto dispatch_allclose = [](const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) -> bool {
@@ -4396,8 +4327,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
auto dispatch_alpha_dropout = [](const Tensor & input, double p, bool train) -> Tensor {
@@ -4419,8 +4350,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
auto dispatch_alpha_dropout_ = [](Tensor self, double p, bool train) -> Tensor {
@@ -4442,8 +4373,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::angle(Tensor self) -> Tensor
@@ -4477,8 +4408,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4538,8 +4469,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
auto dispatch_argmax = [](const Tensor & self, c10::optional<int64_t> dim, bool keepdim) -> Tensor {
@@ -4561,8 +4492,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
auto dispatch_argmin = [](const Tensor & self, c10::optional<int64_t> dim, bool keepdim) -> Tensor {
@@ -4586,8 +4517,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4621,8 +4552,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a)
auto dispatch_as_strided = [](const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) -> Tensor {
@@ -4644,8 +4575,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!)
auto dispatch_as_strided_ = [](Tensor self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) -> Tensor {
@@ -4667,8 +4598,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::asin(Tensor self) -> Tensor
@@ -4699,8 +4630,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::asin_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_asin_ = [](Tensor self) -> Tensor {
@@ -4722,8 +4653,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::atan(Tensor self) -> Tensor
@@ -4754,8 +4685,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::atan2(Tensor self, Tensor other) -> Tensor
@@ -4786,8 +4717,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::atan_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_atan_ = [](Tensor self) -> Tensor {
@@ -4809,8 +4740,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
auto dispatch_avg_pool1d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor {
@@ -4835,8 +4766,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4907,8 +4838,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -4956,8 +4887,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
auto dispatch_batch_norm = [](const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) -> Tensor {
@@ -4979,8 +4910,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu) -> Tensor
auto dispatch_batch_norm_backward_elemt = [](const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu) -> Tensor {
@@ -5002,8 +4933,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
auto dispatch_batch_norm_backward_reduce = [](const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, bool input_g, bool weight_g, bool bias_g) -> std::tuple<Tensor,Tensor,Tensor,Tensor> {
@@ -5025,8 +4956,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(6)) {
// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
@@ -5057,8 +4988,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
auto dispatch_batch_norm_gather_stats = [](const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, int64_t count) -> std::tuple<Tensor,Tensor> {
@@ -5080,8 +5011,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int[] counts) -> (Tensor, Tensor)
auto dispatch_batch_norm_gather_stats_with_counts = [](const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, IntArrayRef counts) -> std::tuple<Tensor,Tensor> {
@@ -5103,8 +5034,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
auto dispatch_batch_norm_stats = [](const Tensor & input, double eps) -> std::tuple<Tensor,Tensor> {
@@ -5126,8 +5057,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
auto dispatch_batch_norm_update_stats = [](const Tensor & input, const Tensor & running_mean, const Tensor & running_var, double momentum) -> std::tuple<Tensor,Tensor> {
@@ -5151,8 +5082,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -5195,8 +5126,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> Tensor
auto dispatch_bilinear = [](const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias) -> Tensor {
@@ -5218,8 +5149,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
auto dispatch_binary_cross_entropy_with_logits = [](const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) -> Tensor {
@@ -5241,8 +5172,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
auto dispatch_bincount = [](const Tensor & self, const Tensor & weights, int64_t minlength) -> Tensor {
@@ -5266,8 +5197,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -5319,8 +5250,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::bitwise_not(Tensor self) -> Tensor
@@ -5353,8 +5284,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -5408,8 +5339,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -5463,8 +5394,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -5512,8 +5443,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::bmm(Tensor self, Tensor mat2) -> Tensor
@@ -5544,8 +5475,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
auto dispatch_broadcast_tensors = [](TensorList tensors) -> std::vector<Tensor> {
@@ -5567,8 +5498,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::can_cast(ScalarType from, ScalarType to) -> bool
auto dispatch_can_cast = [](ScalarType from, ScalarType to) -> bool {
@@ -5590,8 +5521,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cartesian_prod(Tensor[] tensors) -> Tensor
auto dispatch_cartesian_prod = [](TensorList tensors) -> Tensor {
@@ -5615,8 +5546,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -5668,8 +5599,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
auto dispatch_cdist = [](const Tensor & x1, const Tensor & x2, double p, c10::optional<int64_t> compute_mode) -> Tensor {
@@ -5691,8 +5622,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::ceil(Tensor self) -> Tensor
@@ -5723,8 +5654,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_ceil_ = [](Tensor self) -> Tensor {
@@ -5746,8 +5677,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
auto dispatch_celu = [](const Tensor & self, Scalar alpha) -> Tensor {
@@ -5769,8 +5700,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
auto dispatch_celu_ = [](Tensor self, Scalar alpha) -> Tensor {
@@ -5792,8 +5723,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::chain_matmul(Tensor[] matrices) -> Tensor
auto dispatch_chain_matmul = [](TensorList matrices) -> Tensor {
@@ -5815,8 +5746,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::cholesky(Tensor self, bool upper=False) -> Tensor
@@ -5847,8 +5778,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
@@ -5879,8 +5810,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
@@ -5911,8 +5842,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[]
auto dispatch_chunk = [](const Tensor & self, int64_t chunks, int64_t dim) -> std::vector<Tensor> {
@@ -5934,8 +5865,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
@@ -5966,8 +5897,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
auto dispatch_clamp_ = [](Tensor self, c10::optional<Scalar> min, c10::optional<Scalar> max) -> Tensor {
@@ -5989,8 +5920,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::clamp_max(Tensor self, Scalar max) -> Tensor
@@ -6021,8 +5952,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
auto dispatch_clamp_max_ = [](Tensor self, Scalar max) -> Tensor {
@@ -6044,8 +5975,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::clamp_min(Tensor self, Scalar min) -> Tensor
@@ -6076,8 +6007,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
auto dispatch_clamp_min_ = [](Tensor self, Scalar min) -> Tensor {
@@ -6099,8 +6030,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
auto dispatch_clone = [](const Tensor & self, c10::optional<MemoryFormat> memory_format) -> Tensor {
@@ -6122,8 +6053,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
auto dispatch_combinations = [](const Tensor & self, int64_t r, bool with_replacement) -> Tensor {
@@ -6145,8 +6076,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::conj(Tensor self) -> Tensor
@@ -6177,8 +6108,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor
auto dispatch_constant_pad_nd = [](const Tensor & self, IntArrayRef pad, Scalar value) -> Tensor {
@@ -6200,8 +6131,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
auto dispatch_conv1d = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) -> Tensor {
@@ -6223,8 +6154,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
auto dispatch_conv2d = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) -> Tensor {
@@ -6246,8 +6177,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
auto dispatch_conv3d = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) -> Tensor {
@@ -6269,8 +6200,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
auto dispatch_conv_tbc = [](const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) -> Tensor {
@@ -6292,8 +6223,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
auto dispatch_conv_transpose1d = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) -> Tensor {
@@ -6315,8 +6246,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor
auto dispatch_conv_transpose2d = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) -> Tensor {
@@ -6338,8 +6269,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
auto dispatch_conv_transpose3d = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) -> Tensor {
@@ -6361,8 +6292,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
auto dispatch_convolution = [](const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) -> Tensor {
@@ -6384,8 +6315,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::cos(Tensor self) -> Tensor
@@ -6416,8 +6347,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cos_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_cos_ = [](Tensor self) -> Tensor {
@@ -6439,8 +6370,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::cosh(Tensor self) -> Tensor
@@ -6471,8 +6402,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_cosh_ = [](Tensor self) -> Tensor {
@@ -6494,8 +6425,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
auto dispatch_cosine_embedding_loss = [](const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) -> Tensor {
@@ -6517,8 +6448,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
auto dispatch_cosine_similarity = [](const Tensor & x1, const Tensor & x2, int64_t dim, double eps) -> Tensor {
@@ -6540,8 +6471,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
@@ -6574,8 +6505,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -6609,8 +6540,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
auto dispatch_cudnn_affine_grid_generator = [](const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) -> Tensor {
@@ -6632,8 +6563,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
auto dispatch_cudnn_batch_norm = [](const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) -> std::tuple<Tensor,Tensor,Tensor,Tensor> {
@@ -6657,8 +6588,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -6694,8 +6625,8 @@
ParsedArgs<10> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -6729,8 +6660,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
auto dispatch_cudnn_grid_sampler = [](const Tensor & self, const Tensor & grid) -> Tensor {
@@ -6752,8 +6683,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::cudnn_is_acceptable(Tensor self) -> bool
auto dispatch_cudnn_is_acceptable = [](const Tensor & self) -> bool {
@@ -6794,8 +6725,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -6868,8 +6799,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -6925,8 +6856,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -6980,8 +6911,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -7033,8 +6964,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::dequantize(Tensor self) -> Tensor
auto dispatch_dequantize = [](const Tensor & self) -> Tensor {
@@ -7056,8 +6987,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::det(Tensor self) -> Tensor
auto dispatch_det = [](const Tensor & self) -> Tensor {
@@ -7079,8 +7010,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::detach(Tensor self) -> Tensor
auto dispatch_detach = [](const Tensor & self) -> Tensor {
@@ -7102,8 +7033,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::detach_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_detach_ = [](Tensor self) -> Tensor {
@@ -7125,8 +7056,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::diag(Tensor self, int diagonal=0) -> Tensor
@@ -7157,8 +7088,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
auto dispatch_diag_embed = [](const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) -> Tensor {
@@ -7180,8 +7111,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::diagflat(Tensor self, int offset=0) -> Tensor
auto dispatch_diagflat = [](const Tensor & self, int64_t offset) -> Tensor {
@@ -7205,8 +7136,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -7240,8 +7171,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::digamma(Tensor self) -> Tensor
@@ -7272,8 +7203,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
auto dispatch_dist = [](const Tensor & self, const Tensor & other, Scalar p) -> Tensor {
@@ -7295,8 +7226,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
@@ -7327,8 +7258,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::dot(Tensor self, Tensor tensor) -> Tensor
@@ -7359,8 +7290,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::dropout(Tensor input, float p, bool train) -> Tensor
auto dispatch_dropout = [](const Tensor & input, double p, bool train) -> Tensor {
@@ -7382,8 +7313,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
auto dispatch_dropout_ = [](Tensor self, double p, bool train) -> Tensor {
@@ -7422,8 +7353,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors)
@@ -7455,8 +7386,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::einsum(str equation, Tensor[] tensors) -> Tensor
auto dispatch_einsum = [](std::string equation, TensorList tensors) -> Tensor {
@@ -7478,8 +7409,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
auto dispatch_embedding = [](const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor {
@@ -7501,8 +7432,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
auto dispatch_embedding_bag = [](const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights, bool include_last_offset) -> std::tuple<Tensor,Tensor,Tensor,Tensor> {
@@ -7524,8 +7455,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
auto dispatch_embedding_renorm_ = [](Tensor self, const Tensor & indices, double max_norm, double norm_type) -> Tensor {
@@ -7549,8 +7480,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -7614,8 +7545,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -7657,8 +7588,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
const auto options = TensorOptions()
@@ -7689,8 +7620,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -7742,8 +7673,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::equal(Tensor self, Tensor other) -> bool
auto dispatch_equal = [](const Tensor & self, const Tensor & other) -> bool {
@@ -7765,8 +7696,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::erf(Tensor self) -> Tensor
@@ -7797,8 +7728,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::erf_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_erf_ = [](Tensor self) -> Tensor {
@@ -7820,8 +7751,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::erfc(Tensor self) -> Tensor
@@ -7852,8 +7783,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_erfc_ = [](Tensor self) -> Tensor {
@@ -7875,8 +7806,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::erfinv(Tensor self) -> Tensor
@@ -7907,8 +7838,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::exp(Tensor self) -> Tensor
@@ -7939,8 +7870,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::exp_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_exp_ = [](Tensor self) -> Tensor {
@@ -7962,8 +7893,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::expm1(Tensor self) -> Tensor
@@ -7994,8 +7925,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_expm1_ = [](Tensor self) -> Tensor {
@@ -8019,8 +7950,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8092,8 +8023,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
auto dispatch_fake_quantize_per_channel_affine = [](const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) -> Tensor {
@@ -8115,8 +8046,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
auto dispatch_fake_quantize_per_tensor_affine = [](const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) -> Tensor {
@@ -8138,8 +8069,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
auto dispatch_fbgemm_linear_fp16_weight = [](const Tensor & input, const Tensor & packed_weight, const Tensor & bias) -> Tensor {
@@ -8161,8 +8092,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
auto dispatch_fbgemm_linear_fp16_weight_fp32_activation = [](const Tensor & input, const Tensor & packed_weight, const Tensor & bias) -> Tensor {
@@ -8184,8 +8115,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
auto dispatch_fbgemm_linear_int8_weight = [](const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) -> Tensor {
@@ -8207,8 +8138,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
auto dispatch_fbgemm_linear_int8_weight_fp32_activation = [](const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) -> Tensor {
@@ -8230,8 +8161,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
auto dispatch_fbgemm_linear_quantize_weight = [](const Tensor & input) -> std::tuple<Tensor,Tensor,double,int64_t> {
@@ -8253,8 +8184,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
auto dispatch_fbgemm_pack_gemm_matrix_fp16 = [](const Tensor & input) -> Tensor {
@@ -8278,8 +8209,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8313,8 +8244,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
auto dispatch_feature_alpha_dropout = [](const Tensor & input, double p, bool train) -> Tensor {
@@ -8336,8 +8267,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
auto dispatch_feature_alpha_dropout_ = [](Tensor self, double p, bool train) -> Tensor {
@@ -8359,8 +8290,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
auto dispatch_feature_dropout = [](const Tensor & input, double p, bool train) -> Tensor {
@@ -8382,8 +8313,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
auto dispatch_feature_dropout_ = [](Tensor self, double p, bool train) -> Tensor {
@@ -8405,8 +8336,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor
auto dispatch_fft = [](const Tensor & self, int64_t signal_ndim, bool normalized) -> Tensor {
@@ -8430,8 +8361,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8469,8 +8400,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8520,8 +8451,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::flip(Tensor self, int[] dims) -> Tensor
auto dispatch_flip = [](const Tensor & self, IntArrayRef dims) -> Tensor {
@@ -8543,8 +8474,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::floor(Tensor self) -> Tensor
@@ -8575,8 +8506,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::floor_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_floor_ = [](Tensor self) -> Tensor {
@@ -8600,8 +8531,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8637,8 +8568,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8690,8 +8621,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::frac(Tensor self) -> Tensor
@@ -8722,8 +8653,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::frac_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_frac_ = [](Tensor self) -> Tensor {
@@ -8747,8 +8678,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8791,8 +8722,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
const auto options = TensorOptions()
@@ -8823,8 +8754,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8888,8 +8819,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8933,8 +8864,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -8988,8 +8919,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9058,8 +8989,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
@@ -9091,8 +9022,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::ger(Tensor self, Tensor vec2) -> Tensor
@@ -9123,8 +9054,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
auto dispatch_grid_sampler = [](const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) -> Tensor {
@@ -9146,8 +9077,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
auto dispatch_grid_sampler_2d = [](const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) -> Tensor {
@@ -9169,8 +9100,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
auto dispatch_grid_sampler_3d = [](const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) -> Tensor {
@@ -9192,8 +9123,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
auto dispatch_group_norm = [](const Tensor & input, int64_t num_groups, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enabled) -> Tensor {
@@ -9217,8 +9148,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9252,8 +9183,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
auto dispatch_gru_cell = [](const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) -> Tensor {
@@ -9277,8 +9208,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9334,8 +9265,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9415,8 +9346,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9464,8 +9395,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
auto dispatch_hardshrink = [](const Tensor & self, Scalar lambd) -> Tensor {
@@ -9487,8 +9418,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
auto dispatch_hinge_embedding_loss = [](const Tensor & self, const Tensor & target, double margin, int64_t reduction) -> Tensor {
@@ -9510,8 +9441,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(4)) {
// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
@@ -9542,8 +9473,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
@@ -9574,8 +9505,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor
auto dispatch_ifft = [](const Tensor & self, int64_t signal_ndim, bool normalized) -> Tensor {
@@ -9597,8 +9528,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::imag(Tensor self) -> Tensor
@@ -9631,8 +9562,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9668,8 +9599,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9707,8 +9638,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9758,8 +9689,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
auto dispatch_index_put = [](const Tensor & self, TensorList indices, const Tensor & values, bool accumulate) -> Tensor {
@@ -9781,8 +9712,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
auto dispatch_index_put_ = [](Tensor self, TensorList indices, const Tensor & values, bool accumulate) -> Tensor {
@@ -9806,8 +9737,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -9859,8 +9790,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
auto dispatch_instance_norm = [](const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) -> Tensor {
@@ -9882,8 +9813,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::int_repr(Tensor self) -> Tensor
auto dispatch_int_repr = [](const Tensor & self) -> Tensor {
@@ -9905,8 +9836,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::inverse(Tensor self) -> Tensor
@@ -9937,8 +9868,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor
auto dispatch_irfft = [](const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) -> Tensor {
@@ -9960,8 +9891,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::is_complex(Tensor self) -> bool
auto dispatch_is_complex = [](const Tensor & self) -> bool {
@@ -9983,8 +9914,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::is_distributed(Tensor self) -> bool
auto dispatch_is_distributed = [](const Tensor & self) -> bool {
@@ -10006,8 +9937,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::is_floating_point(Tensor self) -> bool
auto dispatch_is_floating_point = [](const Tensor & self) -> bool {
@@ -10029,8 +9960,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::is_nonzero(Tensor self) -> bool
auto dispatch_is_nonzero = [](const Tensor & self) -> bool {
@@ -10052,8 +9983,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::is_same_size(Tensor self, Tensor other) -> bool
auto dispatch_is_same_size = [](const Tensor & self, const Tensor & other) -> bool {
@@ -10075,8 +10006,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::is_signed(Tensor self) -> bool
auto dispatch_is_signed = [](const Tensor & self) -> bool {
@@ -10098,8 +10029,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
auto dispatch_isclose = [](const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) -> Tensor {
@@ -10121,8 +10052,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::isfinite(Tensor self) -> Tensor
auto dispatch_isfinite = [](const Tensor & self) -> Tensor {
@@ -10144,8 +10075,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::isinf(Tensor self) -> Tensor
auto dispatch_isinf = [](const Tensor & self) -> Tensor {
@@ -10167,8 +10098,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::isnan(Tensor self) -> Tensor
auto dispatch_isnan = [](const Tensor & self) -> Tensor {
@@ -10190,8 +10121,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::kl_div(Tensor self, Tensor target, int reduction=Mean) -> Tensor
auto dispatch_kl_div = [](const Tensor & self, const Tensor & target, int64_t reduction) -> Tensor {
@@ -10232,8 +10163,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -10287,8 +10218,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
auto dispatch_layer_norm = [](const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable) -> Tensor {
@@ -10312,8 +10243,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -10367,8 +10298,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -10420,8 +10351,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::lgamma(Tensor self) -> Tensor
@@ -10452,8 +10383,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::linspace(Scalar start, Scalar end, int steps=100, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -10494,8 +10425,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::log(Tensor self) -> Tensor
@@ -10526,8 +10457,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::log10(Tensor self) -> Tensor
@@ -10558,8 +10489,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::log10_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_log10_ = [](Tensor self) -> Tensor {
@@ -10581,8 +10512,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::log1p(Tensor self) -> Tensor
@@ -10613,8 +10544,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_log1p_ = [](Tensor self) -> Tensor {
@@ -10636,8 +10567,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::log2(Tensor self) -> Tensor
@@ -10668,8 +10599,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::log2_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_log2_ = [](Tensor self) -> Tensor {
@@ -10691,8 +10622,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::log_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_log_ = [](Tensor self) -> Tensor {
@@ -10716,8 +10647,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -10751,8 +10682,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::logdet(Tensor self) -> Tensor
auto dispatch_logdet = [](const Tensor & self) -> Tensor {
@@ -10774,8 +10705,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::logical_and(Tensor self, Tensor other) -> Tensor
@@ -10806,8 +10737,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::logical_not(Tensor self) -> Tensor
@@ -10838,8 +10769,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::logical_or(Tensor self, Tensor other) -> Tensor
@@ -10870,8 +10801,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::logical_xor(Tensor self, Tensor other) -> Tensor
@@ -10902,8 +10833,8 @@
ParsedArgs<10> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(4)) {
// aten::logspace(Scalar start, Scalar end, int steps=100, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -10946,8 +10877,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11001,8 +10932,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11036,8 +10967,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
auto dispatch_lstm_cell = [](const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) -> std::tuple<Tensor,Tensor> {
@@ -11076,8 +11007,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
@@ -11111,8 +11042,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11164,8 +11095,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
@@ -11196,8 +11127,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
auto dispatch_margin_ranking_loss = [](const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) -> Tensor {
@@ -11221,8 +11152,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11256,8 +11187,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
auto dispatch_masked_scatter = [](const Tensor & self, const Tensor & mask, const Tensor & source) -> Tensor {
@@ -11279,8 +11210,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::masked_select(Tensor self, Tensor mask) -> Tensor
@@ -11311,8 +11242,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::matmul(Tensor self, Tensor other) -> Tensor
@@ -11343,8 +11274,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::matrix_power(Tensor self, int n) -> Tensor
auto dispatch_matrix_power = [](const Tensor & self, int64_t n) -> Tensor {
@@ -11368,8 +11299,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11424,8 +11355,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11504,8 +11435,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
auto dispatch_max_pool1d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> Tensor {
@@ -11527,8 +11458,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
auto dispatch_max_pool1d_with_indices = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> std::tuple<Tensor,Tensor> {
@@ -11550,8 +11481,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
auto dispatch_max_pool2d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> Tensor {
@@ -11573,8 +11504,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
auto dispatch_max_pool3d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> Tensor {
@@ -11599,8 +11530,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11680,8 +11611,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11743,8 +11674,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::meshgrid(Tensor[] tensors) -> Tensor[]
auto dispatch_meshgrid = [](TensorList tensors) -> std::vector<Tensor> {
@@ -11787,8 +11718,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -11867,8 +11798,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
auto dispatch_miopen_batch_norm = [](const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) -> std::tuple<Tensor,Tensor,Tensor> {
@@ -11890,8 +11821,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
auto dispatch_miopen_convolution = [](const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) -> Tensor {
@@ -11913,8 +11844,8 @@
ParsedArgs<10> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
auto dispatch_miopen_convolution_transpose = [](const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) -> Tensor {
@@ -11936,8 +11867,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
auto dispatch_miopen_depthwise_convolution = [](const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) -> Tensor {
@@ -11959,8 +11890,8 @@
ParsedArgs<14> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
auto dispatch_miopen_rnn = [](const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state) -> std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> {
@@ -11982,8 +11913,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
auto dispatch_mkldnn_adaptive_avg_pool2d = [](const Tensor & self, IntArrayRef output_size) -> Tensor {
@@ -12005,8 +11936,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
auto dispatch_mkldnn_convolution = [](const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) -> Tensor {
@@ -12028,8 +11959,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::mkldnn_convolution_backward_weights(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> (Tensor, Tensor)
auto dispatch_mkldnn_convolution_backward_weights = [](IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) -> std::tuple<Tensor,Tensor> {
@@ -12051,8 +11982,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
auto dispatch_mkldnn_max_pool2d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> Tensor {
@@ -12074,8 +12005,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::mm(Tensor self, Tensor mat2) -> Tensor
@@ -12125,8 +12056,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12180,8 +12111,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
@@ -12212,8 +12143,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(4)) {
// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
@@ -12244,8 +12175,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::mv(Tensor self, Tensor vec) -> Tensor
@@ -12276,8 +12207,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::mvlgamma(Tensor self, int p) -> Tensor
auto dispatch_mvlgamma = [](const Tensor & self, int64_t p) -> Tensor {
@@ -12299,8 +12230,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)
auto dispatch_narrow = [](const Tensor & self, int64_t dim, int64_t start, int64_t length) -> Tensor {
@@ -12322,8 +12253,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(8)) {
// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
@@ -12355,8 +12286,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::native_layer_norm(Tensor input, Tensor? weight, Tensor? bias, int M, int N, float eps) -> (Tensor, Tensor, Tensor)
auto dispatch_native_layer_norm = [](const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t M, int64_t N, double eps) -> std::tuple<Tensor,Tensor,Tensor> {
@@ -12378,8 +12309,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
auto dispatch_native_norm = [](const Tensor & self, Scalar p) -> Tensor {
@@ -12403,8 +12334,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12456,8 +12387,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::neg(Tensor self) -> Tensor
@@ -12488,8 +12419,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::neg_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_neg_ = [](Tensor self) -> Tensor {
@@ -12517,8 +12448,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12620,8 +12551,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
auto dispatch_norm_except_dim = [](const Tensor & v, int64_t pow, int64_t dim) -> Tensor {
@@ -12647,8 +12578,8 @@
ParsedArgs<10> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12746,8 +12677,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12801,8 +12732,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12866,8 +12797,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -12909,8 +12840,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::orgqr(Tensor self, Tensor input2) -> Tensor
@@ -12941,8 +12872,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(5)) {
// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
@@ -12973,8 +12904,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
auto dispatch_pairwise_distance = [](const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim) -> Tensor {
@@ -12996,8 +12927,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::pdist(Tensor self, float p=2) -> Tensor
auto dispatch_pdist = [](const Tensor & self, double p) -> Tensor {
@@ -13019,8 +12950,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
auto dispatch_pinverse = [](const Tensor & self, double rcond) -> Tensor {
@@ -13042,8 +12973,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
auto dispatch_pixel_shuffle = [](const Tensor & self, int64_t upscale_factor) -> Tensor {
@@ -13065,8 +12996,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
auto dispatch_poisson = [](const Tensor & self, Generator * generator) -> Tensor {
@@ -13088,8 +13019,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
auto dispatch_poisson_nll_loss = [](const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction) -> Tensor {
@@ -13111,8 +13042,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::polygamma(int n, Tensor self) -> Tensor
@@ -13146,8 +13077,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13216,8 +13147,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::prelu(Tensor self, Tensor weight) -> Tensor
auto dispatch_prelu = [](const Tensor & self, const Tensor & weight) -> Tensor {
@@ -13242,8 +13173,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13303,8 +13234,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
auto dispatch_promote_types = [](ScalarType type1, ScalarType type2) -> ScalarType {
@@ -13326,8 +13257,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::q_per_channel_axis(Tensor self) -> int
auto dispatch_q_per_channel_axis = [](const Tensor & self) -> int64_t {
@@ -13349,8 +13280,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::q_per_channel_scales(Tensor self) -> Tensor
auto dispatch_q_per_channel_scales = [](const Tensor & self) -> Tensor {
@@ -13372,8 +13303,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::q_per_channel_zero_points(Tensor self) -> Tensor
auto dispatch_q_per_channel_zero_points = [](const Tensor & self) -> Tensor {
@@ -13395,8 +13326,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::q_scale(Tensor self) -> float
auto dispatch_q_scale = [](const Tensor & self) -> double {
@@ -13418,8 +13349,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::q_zero_point(Tensor self) -> int
auto dispatch_q_zero_point = [](const Tensor & self) -> int64_t {
@@ -13458,8 +13389,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
@@ -13491,8 +13422,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
auto dispatch_quantize_per_channel = [](const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype) -> Tensor {
@@ -13514,8 +13445,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
auto dispatch_quantize_per_tensor = [](const Tensor & self, double scale, int64_t zero_point, ScalarType dtype) -> Tensor {
@@ -13539,8 +13470,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13574,8 +13505,8 @@
ParsedArgs<14> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
auto dispatch_quantized_gru_cell = [](const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor {
@@ -13599,8 +13530,8 @@
ParsedArgs<11> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13634,8 +13565,8 @@
ParsedArgs<14> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
auto dispatch_quantized_lstm_cell = [](const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> std::tuple<Tensor,Tensor> {
@@ -13657,8 +13588,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
auto dispatch_quantized_max_pool2d = [](const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) -> Tensor {
@@ -13680,8 +13611,8 @@
ParsedArgs<14> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
auto dispatch_quantized_rnn_relu_cell = [](const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor {
@@ -13703,8 +13634,8 @@
ParsedArgs<14> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
auto dispatch_quantized_rnn_tanh_cell = [](const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor {
@@ -13730,8 +13661,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13839,8 +13770,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13886,8 +13817,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -13957,8 +13888,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14066,8 +13997,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14111,8 +14042,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14184,8 +14115,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::real(Tensor self) -> Tensor
@@ -14216,8 +14147,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::reciprocal(Tensor self) -> Tensor
@@ -14248,8 +14179,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_reciprocal_ = [](Tensor self) -> Tensor {
@@ -14271,8 +14202,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::relu(Tensor self) -> Tensor
auto dispatch_relu = [](const Tensor & self) -> Tensor {
@@ -14294,8 +14225,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::relu_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_relu_ = [](Tensor self) -> Tensor {
@@ -14319,8 +14250,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14372,8 +14303,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(4)) {
// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
@@ -14407,8 +14338,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14450,8 +14381,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::reshape(Tensor self, int[] shape) -> Tensor
auto dispatch_reshape = [](const Tensor & self, IntArrayRef shape) -> Tensor {
@@ -14473,8 +14404,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
auto dispatch_resize_as_ = [](Tensor self, const Tensor & the_template, c10::optional<MemoryFormat> memory_format) -> Tensor {
@@ -14500,8 +14431,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14551,8 +14482,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor
auto dispatch_rfft = [](const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) -> Tensor {
@@ -14576,8 +14507,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14611,8 +14542,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
auto dispatch_rnn_relu_cell = [](const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) -> Tensor {
@@ -14636,8 +14567,8 @@
ParsedArgs<9> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14671,8 +14602,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
auto dispatch_rnn_tanh_cell = [](const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) -> Tensor {
@@ -14694,8 +14625,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
auto dispatch_roll = [](const Tensor & self, IntArrayRef shifts, IntArrayRef dims) -> Tensor {
@@ -14717,8 +14648,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
auto dispatch_rot90 = [](const Tensor & self, int64_t k, IntArrayRef dims) -> Tensor {
@@ -14740,8 +14671,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::round(Tensor self) -> Tensor
@@ -14772,8 +14703,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::round_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_round_ = [](Tensor self) -> Tensor {
@@ -14795,8 +14726,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
auto dispatch_rrelu = [](const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor {
@@ -14818,8 +14749,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
auto dispatch_rrelu_ = [](Tensor self, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor {
@@ -14841,8 +14772,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::rsqrt(Tensor self) -> Tensor
@@ -14873,8 +14804,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_rsqrt_ = [](Tensor self) -> Tensor {
@@ -14898,8 +14829,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -14933,8 +14864,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
const auto options = TensorOptions()
@@ -14967,8 +14898,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15020,8 +14951,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15057,8 +14988,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15092,8 +15023,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::selu(Tensor self) -> Tensor
auto dispatch_selu = [](const Tensor & self) -> Tensor {
@@ -15115,8 +15046,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::selu_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_selu_ = [](Tensor self) -> Tensor {
@@ -15138,8 +15069,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::sigmoid(Tensor self) -> Tensor
@@ -15170,8 +15101,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_sigmoid_ = [](Tensor self) -> Tensor {
@@ -15193,8 +15124,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::sign(Tensor self) -> Tensor
@@ -15225,8 +15156,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::sin(Tensor self) -> Tensor
@@ -15257,8 +15188,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::sin_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_sin_ = [](Tensor self) -> Tensor {
@@ -15280,8 +15211,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::sinh(Tensor self) -> Tensor
@@ -15312,8 +15243,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_sinh_ = [](Tensor self) -> Tensor {
@@ -15344,8 +15275,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
auto dispatch_slogdet = [](const Tensor & self) -> std::tuple<Tensor,Tensor> {
@@ -15367,8 +15298,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::smm(Tensor self, Tensor mat2) -> Tensor
auto dispatch_smm = [](const Tensor & self, const Tensor & mat2) -> Tensor {
@@ -15392,8 +15323,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15444,8 +15375,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU)
@@ -15496,8 +15427,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15551,8 +15482,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[]
auto dispatch_split = [](const Tensor & self, int64_t split_size, int64_t dim) -> std::vector<Tensor> {
@@ -15574,8 +15505,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
auto dispatch_split_with_sizes = [](const Tensor & self, IntArrayRef split_sizes, int64_t dim) -> std::vector<Tensor> {
@@ -15597,8 +15528,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::sqrt(Tensor self) -> Tensor
@@ -15629,8 +15560,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_sqrt_ = [](Tensor self) -> Tensor {
@@ -15652,8 +15583,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::square(Tensor self) -> Tensor
auto dispatch_square = [](const Tensor & self) -> Tensor {
@@ -15675,8 +15606,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::square_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_square_ = [](Tensor self) -> Tensor {
@@ -15701,8 +15632,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15747,8 +15678,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15799,8 +15730,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
@@ -15834,8 +15765,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15898,8 +15829,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -15941,8 +15872,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor
auto dispatch_stft = [](const Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const Tensor & window, bool normalized, bool onesided) -> Tensor {
@@ -15966,8 +15897,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -16022,8 +15953,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -16100,8 +16031,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
@@ -16150,8 +16081,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)
@@ -16183,8 +16114,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::t(Tensor(a) self) -> Tensor(a)
auto dispatch_t = [](const Tensor & self) -> Tensor {
@@ -16206,8 +16137,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::take(Tensor self, Tensor index) -> Tensor
@@ -16238,8 +16169,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::tan(Tensor self) -> Tensor
@@ -16270,8 +16201,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::tan_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_tan_ = [](Tensor self) -> Tensor {
@@ -16293,8 +16224,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::tanh(Tensor self) -> Tensor
@@ -16325,8 +16256,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_tanh_ = [](Tensor self) -> Tensor {
@@ -16348,8 +16279,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
auto dispatch_tensordot = [](const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other) -> Tensor {
@@ -16371,8 +16302,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(3)) {
// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
@@ -16403,8 +16334,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
auto dispatch_threshold_ = [](Tensor self, Scalar threshold, Scalar value) -> Tensor {
@@ -16443,8 +16374,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(5)) {
// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
@@ -16476,8 +16407,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::trace(Tensor self) -> Tensor
auto dispatch_trace = [](const Tensor & self) -> Tensor {
@@ -16501,8 +16432,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -16538,8 +16469,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -16590,8 +16521,8 @@
ParsedArgs<6> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(5)) {
// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
@@ -16623,8 +16554,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::tril(Tensor self, int diagonal=0) -> Tensor
@@ -16655,8 +16586,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
const auto options = TensorOptions()
@@ -16685,8 +16616,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
auto dispatch_triplet_margin_loss = [](const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) -> Tensor {
@@ -16708,8 +16639,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(2)) {
// aten::triu(Tensor self, int diagonal=0) -> Tensor
@@ -16740,8 +16671,8 @@
ParsedArgs<8> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
const auto options = TensorOptions()
@@ -16770,8 +16701,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (_r.isNone(1)) {
// aten::trunc(Tensor self) -> Tensor
@@ -16802,8 +16733,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_trunc_ = [](Tensor self) -> Tensor {
@@ -16827,8 +16758,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -16862,8 +16793,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
auto dispatch_unique_consecutive = [](const Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) -> std::tuple<Tensor,Tensor,Tensor> {
@@ -16885,8 +16816,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
auto dispatch_unique_dim = [](const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) -> std::tuple<Tensor,Tensor,Tensor> {
@@ -16908,8 +16839,8 @@
ParsedArgs<2> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
auto dispatch_unsqueeze = [](const Tensor & self, int64_t dim) -> Tensor {
@@ -16934,8 +16865,8 @@
ParsedArgs<5> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -16998,8 +16929,8 @@
ParsedArgs<4> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -17043,8 +16974,8 @@
ParsedArgs<3> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -17078,8 +17009,8 @@
ParsedArgs<1> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
// aten::zero_(Tensor(a!) self) -> Tensor(a!)
auto dispatch_zero_ = [](Tensor self) -> Tensor {
@@ -17103,8 +17034,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -17168,8 +17099,8 @@
ParsedArgs<7> parsed_args;
auto _r = parser.parse(args, kwargs, parsed_args);
- if (_r.has_torch_function()) {
- return handle_torch_function(_r, args, kwargs, THPVariableFunctions);
+ if(_r.has_torch_function()) {
+ return handle_torch_function(_r, args, kwargs, THPVariableFunctionsModule, "torch");
}
switch (_r.idx) {
case 0: {
@@ -17212,7 +17143,7 @@
auto r = parser.parse(args, kwargs, parsed_args);
if(r.has_torch_function()){
- return handle_torch_function(r, args, kwargs, THPVariableFunctions);
+ return handle_torch_function(r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (r.idx == 0) {
@@ -17242,7 +17173,7 @@
auto r = parser.parse(args, kwargs, parsed_args);
if(r.has_torch_function()){
- return handle_torch_function(r, args, kwargs, THPVariableFunctions);
+ return handle_torch_function(r, args, kwargs, THPVariableFunctionsModule, "torch");
}
if (r.idx == 0) {
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment