-
-
Save cpuhrsch/014738535d95b4b7cb0f6389b18e757a to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/build/aten/src/ATen/CPUByteType.cpp b/build/aten/src/ATen/CPUByteType.cpp | |
index 266e2ba1e..c0f316787 100644 | |
--- a/build/aten/src/ATen/CPUByteType.cpp | |
+++ b/build/aten/src/ATen/CPUByteType.cpp | |
@@ -2444,9 +2444,9 @@ std::tuple<Tensor,Tensor> CPUByteType::_weight_norm_cuda_interface(const Tensor | |
std::tuple<Tensor,Tensor> CPUByteType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPUByteType"); | |
} | |
-Tensor CPUByteType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPUByteType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPUByteType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2611,9 +2611,9 @@ Tensor CPUByteType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPUByteType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPUByteType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPUByteType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2655,9 +2655,9 @@ Tensor CPUByteType::fractional_max_pool3d_backward(const Tensor & grad_output, c | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPUByteType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUByteType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUByteType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2671,9 +2671,9 @@ Tensor CPUByteType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUByteType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUByteType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUByteType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2687,9 +2687,9 @@ Tensor CPUByteType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUByteType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUByteType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUByteType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2703,9 +2703,9 @@ Tensor CPUByteType::replication_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUByteType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUByteType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUByteType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2719,9 +2719,9 @@ Tensor CPUByteType::replication_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUByteType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUByteType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUByteType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPUByteType.h b/build/aten/src/ATen/CPUByteType.h | |
index 1fefddd23..2e18cd232 100644 | |
--- a/build/aten/src/ATen/CPUByteType.h | |
+++ b/build/aten/src/ATen/CPUByteType.h | |
@@ -377,7 +377,7 @@ struct CPUByteType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -426,7 +426,7 @@ struct CPUByteType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -437,23 +437,23 @@ struct CPUByteType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CPUCharType.cpp b/build/aten/src/ATen/CPUCharType.cpp | |
index bd1004008..c4d3ec1c6 100644 | |
--- a/build/aten/src/ATen/CPUCharType.cpp | |
+++ b/build/aten/src/ATen/CPUCharType.cpp | |
@@ -2444,9 +2444,9 @@ std::tuple<Tensor,Tensor> CPUCharType::_weight_norm_cuda_interface(const Tensor | |
std::tuple<Tensor,Tensor> CPUCharType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPUCharType"); | |
} | |
-Tensor CPUCharType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPUCharType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPUCharType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2611,9 +2611,9 @@ Tensor CPUCharType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPUCharType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPUCharType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPUCharType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2655,9 +2655,9 @@ Tensor CPUCharType::fractional_max_pool3d_backward(const Tensor & grad_output, c | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPUCharType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUCharType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUCharType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2671,9 +2671,9 @@ Tensor CPUCharType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUCharType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUCharType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUCharType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2687,9 +2687,9 @@ Tensor CPUCharType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUCharType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUCharType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUCharType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2703,9 +2703,9 @@ Tensor CPUCharType::replication_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUCharType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUCharType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUCharType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2719,9 +2719,9 @@ Tensor CPUCharType::replication_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUCharType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUCharType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUCharType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPUCharType.h b/build/aten/src/ATen/CPUCharType.h | |
index ce4133625..f347d5625 100644 | |
--- a/build/aten/src/ATen/CPUCharType.h | |
+++ b/build/aten/src/ATen/CPUCharType.h | |
@@ -377,7 +377,7 @@ struct CPUCharType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -426,7 +426,7 @@ struct CPUCharType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -437,23 +437,23 @@ struct CPUCharType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CPUDoubleType.cpp b/build/aten/src/ATen/CPUDoubleType.cpp | |
index 2865317ab..5155fac56 100644 | |
--- a/build/aten/src/ATen/CPUDoubleType.cpp | |
+++ b/build/aten/src/ATen/CPUDoubleType.cpp | |
@@ -5503,9 +5503,9 @@ std::tuple<Tensor,Tensor> CPUDoubleType::_weight_norm_cuda_interface(const Tenso | |
std::tuple<Tensor,Tensor> CPUDoubleType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPUDoubleType"); | |
} | |
-Tensor CPUDoubleType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPUDoubleType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPUDoubleType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5670,9 +5670,9 @@ Tensor CPUDoubleType::histc(const Tensor & self, int64_t bins, Scalar min, Scala | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPUDoubleType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPUDoubleType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPUDoubleType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5714,9 +5714,9 @@ Tensor CPUDoubleType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPUDoubleType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUDoubleType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUDoubleType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5730,9 +5730,9 @@ Tensor CPUDoubleType::reflection_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUDoubleType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUDoubleType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUDoubleType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5746,9 +5746,9 @@ Tensor CPUDoubleType::reflection_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUDoubleType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUDoubleType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUDoubleType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5762,9 +5762,9 @@ Tensor CPUDoubleType::replication_pad1d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUDoubleType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUDoubleType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUDoubleType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5778,9 +5778,9 @@ Tensor CPUDoubleType::replication_pad2d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUDoubleType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUDoubleType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUDoubleType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPUDoubleType.h b/build/aten/src/ATen/CPUDoubleType.h | |
index b9641afd0..ce5da901f 100644 | |
--- a/build/aten/src/ATen/CPUDoubleType.h | |
+++ b/build/aten/src/ATen/CPUDoubleType.h | |
@@ -637,7 +637,7 @@ struct CPUDoubleType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -686,7 +686,7 @@ struct CPUDoubleType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -697,23 +697,23 @@ struct CPUDoubleType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CPUFloatType.cpp b/build/aten/src/ATen/CPUFloatType.cpp | |
index bbe1c7797..d8779d5d3 100644 | |
--- a/build/aten/src/ATen/CPUFloatType.cpp | |
+++ b/build/aten/src/ATen/CPUFloatType.cpp | |
@@ -5503,9 +5503,9 @@ std::tuple<Tensor,Tensor> CPUFloatType::_weight_norm_cuda_interface(const Tensor | |
std::tuple<Tensor,Tensor> CPUFloatType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPUFloatType"); | |
} | |
-Tensor CPUFloatType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPUFloatType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPUFloatType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5670,9 +5670,9 @@ Tensor CPUFloatType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPUFloatType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPUFloatType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPUFloatType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5714,9 +5714,9 @@ Tensor CPUFloatType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPUFloatType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUFloatType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUFloatType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5730,9 +5730,9 @@ Tensor CPUFloatType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUFloatType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUFloatType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUFloatType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5746,9 +5746,9 @@ Tensor CPUFloatType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUFloatType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUFloatType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUFloatType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5762,9 +5762,9 @@ Tensor CPUFloatType::replication_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUFloatType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUFloatType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUFloatType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5778,9 +5778,9 @@ Tensor CPUFloatType::replication_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUFloatType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUFloatType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUFloatType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPUFloatType.h b/build/aten/src/ATen/CPUFloatType.h | |
index 6a0cff3eb..5d344de51 100644 | |
--- a/build/aten/src/ATen/CPUFloatType.h | |
+++ b/build/aten/src/ATen/CPUFloatType.h | |
@@ -637,7 +637,7 @@ struct CPUFloatType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -686,7 +686,7 @@ struct CPUFloatType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -697,23 +697,23 @@ struct CPUFloatType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CPUIntType.cpp b/build/aten/src/ATen/CPUIntType.cpp | |
index e356caf1d..475d78107 100644 | |
--- a/build/aten/src/ATen/CPUIntType.cpp | |
+++ b/build/aten/src/ATen/CPUIntType.cpp | |
@@ -2444,9 +2444,9 @@ std::tuple<Tensor,Tensor> CPUIntType::_weight_norm_cuda_interface(const Tensor & | |
std::tuple<Tensor,Tensor> CPUIntType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPUIntType"); | |
} | |
-Tensor CPUIntType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPUIntType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPUIntType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2611,9 +2611,9 @@ Tensor CPUIntType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar m | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPUIntType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPUIntType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPUIntType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2655,9 +2655,9 @@ Tensor CPUIntType::fractional_max_pool3d_backward(const Tensor & grad_output, co | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPUIntType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUIntType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUIntType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2671,9 +2671,9 @@ Tensor CPUIntType::reflection_pad1d_backward(const Tensor & grad_output, const T | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUIntType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUIntType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUIntType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2687,9 +2687,9 @@ Tensor CPUIntType::reflection_pad2d_backward(const Tensor & grad_output, const T | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUIntType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUIntType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUIntType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2703,9 +2703,9 @@ Tensor CPUIntType::replication_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUIntType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUIntType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUIntType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2719,9 +2719,9 @@ Tensor CPUIntType::replication_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUIntType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUIntType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUIntType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPUIntType.h b/build/aten/src/ATen/CPUIntType.h | |
index 362750fdf..25dc72eb3 100644 | |
--- a/build/aten/src/ATen/CPUIntType.h | |
+++ b/build/aten/src/ATen/CPUIntType.h | |
@@ -377,7 +377,7 @@ struct CPUIntType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -426,7 +426,7 @@ struct CPUIntType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -437,23 +437,23 @@ struct CPUIntType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CPULongType.cpp b/build/aten/src/ATen/CPULongType.cpp | |
index 7a7d071a9..9b0d3e545 100644 | |
--- a/build/aten/src/ATen/CPULongType.cpp | |
+++ b/build/aten/src/ATen/CPULongType.cpp | |
@@ -2444,9 +2444,9 @@ std::tuple<Tensor,Tensor> CPULongType::_weight_norm_cuda_interface(const Tensor | |
std::tuple<Tensor,Tensor> CPULongType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPULongType"); | |
} | |
-Tensor CPULongType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPULongType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPULongType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2611,9 +2611,9 @@ Tensor CPULongType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPULongType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPULongType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPULongType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2655,9 +2655,9 @@ Tensor CPULongType::fractional_max_pool3d_backward(const Tensor & grad_output, c | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPULongType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPULongType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPULongType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2671,9 +2671,9 @@ Tensor CPULongType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPULongType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPULongType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPULongType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2687,9 +2687,9 @@ Tensor CPULongType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPULongType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPULongType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPULongType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2703,9 +2703,9 @@ Tensor CPULongType::replication_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPULongType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPULongType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPULongType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2719,9 +2719,9 @@ Tensor CPULongType::replication_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPULongType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPULongType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPULongType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPULongType.h b/build/aten/src/ATen/CPULongType.h | |
index f4bfa6ff7..2823b614d 100644 | |
--- a/build/aten/src/ATen/CPULongType.h | |
+++ b/build/aten/src/ATen/CPULongType.h | |
@@ -377,7 +377,7 @@ struct CPULongType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -426,7 +426,7 @@ struct CPULongType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -437,23 +437,23 @@ struct CPULongType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CPUShortType.cpp b/build/aten/src/ATen/CPUShortType.cpp | |
index d0e023c80..f007480dc 100644 | |
--- a/build/aten/src/ATen/CPUShortType.cpp | |
+++ b/build/aten/src/ATen/CPUShortType.cpp | |
@@ -2444,9 +2444,9 @@ std::tuple<Tensor,Tensor> CPUShortType::_weight_norm_cuda_interface(const Tensor | |
std::tuple<Tensor,Tensor> CPUShortType::_weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const { | |
AT_ERROR("_weight_norm_cuda_interface_backward not supported on CPUShortType"); | |
} | |
-Tensor CPUShortType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CPUShortType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cpu(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cpu(/* actuals */ self, out); | |
} | |
Tensor CPUShortType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2611,9 +2611,9 @@ Tensor CPUShortType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cpu(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CPUShortType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CPUShortType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cpu(/* actuals */ out, self, output_size); | |
} | |
Tensor CPUShortType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2655,9 +2655,9 @@ Tensor CPUShortType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cpu(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CPUShortType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUShortType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUShortType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2671,9 +2671,9 @@ Tensor CPUShortType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUShortType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUShortType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUShortType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2687,9 +2687,9 @@ Tensor CPUShortType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUShortType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUShortType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUShortType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2703,9 +2703,9 @@ Tensor CPUShortType::replication_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUShortType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUShortType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUShortType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2719,9 +2719,9 @@ Tensor CPUShortType::replication_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cpu(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CPUShortType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CPUShortType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cpu(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cpu(/* actuals */ out, self, padding); | |
} | |
Tensor CPUShortType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CPUShortType.h b/build/aten/src/ATen/CPUShortType.h | |
index b8efa10c2..a9f342c40 100644 | |
--- a/build/aten/src/ATen/CPUShortType.h | |
+++ b/build/aten/src/ATen/CPUShortType.h | |
@@ -377,7 +377,7 @@ struct CPUShortType final : public CPUTypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -426,7 +426,7 @@ struct CPUShortType final : public CPUTypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -437,23 +437,23 @@ struct CPUShortType final : public CPUTypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDAByteType.cpp b/build/aten/src/ATen/CUDAByteType.cpp | |
index f2b9baf94..6e19ff46d 100644 | |
--- a/build/aten/src/ATen/CUDAByteType.cpp | |
+++ b/build/aten/src/ATen/CUDAByteType.cpp | |
@@ -2512,9 +2512,9 @@ std::tuple<Tensor,Tensor> CUDAByteType::_weight_norm_cuda_interface_backward(con | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDAByteType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDAByteType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDAByteType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2683,9 +2683,9 @@ Tensor CUDAByteType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDAByteType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDAByteType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDAByteType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2727,9 +2727,9 @@ Tensor CUDAByteType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDAByteType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAByteType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAByteType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2743,9 +2743,9 @@ Tensor CUDAByteType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAByteType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAByteType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAByteType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2759,9 +2759,9 @@ Tensor CUDAByteType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAByteType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAByteType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAByteType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2775,9 +2775,9 @@ Tensor CUDAByteType::replication_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAByteType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAByteType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAByteType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2791,9 +2791,9 @@ Tensor CUDAByteType::replication_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAByteType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAByteType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAByteType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDAByteType.h b/build/aten/src/ATen/CUDAByteType.h | |
index 35907e471..e4a6c17b7 100644 | |
--- a/build/aten/src/ATen/CUDAByteType.h | |
+++ b/build/aten/src/ATen/CUDAByteType.h | |
@@ -383,7 +383,7 @@ struct CUDAByteType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -432,7 +432,7 @@ struct CUDAByteType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -443,23 +443,23 @@ struct CUDAByteType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDACharType.cpp b/build/aten/src/ATen/CUDACharType.cpp | |
index 8219b0ba6..e344fd743 100644 | |
--- a/build/aten/src/ATen/CUDACharType.cpp | |
+++ b/build/aten/src/ATen/CUDACharType.cpp | |
@@ -2512,9 +2512,9 @@ std::tuple<Tensor,Tensor> CUDACharType::_weight_norm_cuda_interface_backward(con | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDACharType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDACharType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDACharType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2683,9 +2683,9 @@ Tensor CUDACharType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDACharType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDACharType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDACharType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2727,9 +2727,9 @@ Tensor CUDACharType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDACharType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDACharType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDACharType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2743,9 +2743,9 @@ Tensor CUDACharType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDACharType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDACharType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDACharType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2759,9 +2759,9 @@ Tensor CUDACharType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDACharType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDACharType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDACharType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2775,9 +2775,9 @@ Tensor CUDACharType::replication_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDACharType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDACharType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDACharType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2791,9 +2791,9 @@ Tensor CUDACharType::replication_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDACharType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDACharType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDACharType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDACharType.h b/build/aten/src/ATen/CUDACharType.h | |
index 25eda7e2a..ae20caba1 100644 | |
--- a/build/aten/src/ATen/CUDACharType.h | |
+++ b/build/aten/src/ATen/CUDACharType.h | |
@@ -383,7 +383,7 @@ struct CUDACharType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -432,7 +432,7 @@ struct CUDACharType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -443,23 +443,23 @@ struct CUDACharType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDADoubleType.cpp b/build/aten/src/ATen/CUDADoubleType.cpp | |
index d0765f2bc..d34f2bca3 100644 | |
--- a/build/aten/src/ATen/CUDADoubleType.cpp | |
+++ b/build/aten/src/ATen/CUDADoubleType.cpp | |
@@ -5847,9 +5847,9 @@ std::tuple<Tensor,Tensor> CUDADoubleType::_weight_norm_cuda_interface_backward(c | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDADoubleType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDADoubleType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDADoubleType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6018,9 +6018,9 @@ Tensor CUDADoubleType::histc(const Tensor & self, int64_t bins, Scalar min, Scal | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDADoubleType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDADoubleType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDADoubleType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6062,9 +6062,9 @@ Tensor CUDADoubleType::fractional_max_pool3d_backward(const Tensor & grad_output | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDADoubleType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDADoubleType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDADoubleType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6078,9 +6078,9 @@ Tensor CUDADoubleType::reflection_pad1d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDADoubleType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDADoubleType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDADoubleType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6094,9 +6094,9 @@ Tensor CUDADoubleType::reflection_pad2d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDADoubleType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDADoubleType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDADoubleType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6110,9 +6110,9 @@ Tensor CUDADoubleType::replication_pad1d_backward(const Tensor & grad_output, co | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDADoubleType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDADoubleType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDADoubleType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6126,9 +6126,9 @@ Tensor CUDADoubleType::replication_pad2d_backward(const Tensor & grad_output, co | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDADoubleType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDADoubleType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDADoubleType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDADoubleType.h b/build/aten/src/ATen/CUDADoubleType.h | |
index 3b0428298..fca33491e 100644 | |
--- a/build/aten/src/ATen/CUDADoubleType.h | |
+++ b/build/aten/src/ATen/CUDADoubleType.h | |
@@ -679,7 +679,7 @@ struct CUDADoubleType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -728,7 +728,7 @@ struct CUDADoubleType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -739,23 +739,23 @@ struct CUDADoubleType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDAFloatType.cpp b/build/aten/src/ATen/CUDAFloatType.cpp | |
index 56bca846b..bd2eb993a 100644 | |
--- a/build/aten/src/ATen/CUDAFloatType.cpp | |
+++ b/build/aten/src/ATen/CUDAFloatType.cpp | |
@@ -5847,9 +5847,9 @@ std::tuple<Tensor,Tensor> CUDAFloatType::_weight_norm_cuda_interface_backward(co | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDAFloatType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDAFloatType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDAFloatType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6018,9 +6018,9 @@ Tensor CUDAFloatType::histc(const Tensor & self, int64_t bins, Scalar min, Scala | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDAFloatType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDAFloatType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDAFloatType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6062,9 +6062,9 @@ Tensor CUDAFloatType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDAFloatType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAFloatType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAFloatType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6078,9 +6078,9 @@ Tensor CUDAFloatType::reflection_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAFloatType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAFloatType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAFloatType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6094,9 +6094,9 @@ Tensor CUDAFloatType::reflection_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAFloatType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAFloatType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAFloatType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6110,9 +6110,9 @@ Tensor CUDAFloatType::replication_pad1d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAFloatType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAFloatType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAFloatType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6126,9 +6126,9 @@ Tensor CUDAFloatType::replication_pad2d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAFloatType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAFloatType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAFloatType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDAFloatType.h b/build/aten/src/ATen/CUDAFloatType.h | |
index b09919d14..aa07d7e51 100644 | |
--- a/build/aten/src/ATen/CUDAFloatType.h | |
+++ b/build/aten/src/ATen/CUDAFloatType.h | |
@@ -679,7 +679,7 @@ struct CUDAFloatType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -728,7 +728,7 @@ struct CUDAFloatType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -739,23 +739,23 @@ struct CUDAFloatType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDAHalfType.cpp b/build/aten/src/ATen/CUDAHalfType.cpp | |
index 6470eed75..67844c35d 100644 | |
--- a/build/aten/src/ATen/CUDAHalfType.cpp | |
+++ b/build/aten/src/ATen/CUDAHalfType.cpp | |
@@ -5636,9 +5636,9 @@ std::tuple<Tensor,Tensor> CUDAHalfType::_weight_norm_cuda_interface_backward(con | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDAHalfType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDAHalfType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDAHalfType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5807,9 +5807,9 @@ Tensor CUDAHalfType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDAHalfType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDAHalfType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDAHalfType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5851,9 +5851,9 @@ Tensor CUDAHalfType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDAHalfType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAHalfType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAHalfType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5867,9 +5867,9 @@ Tensor CUDAHalfType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAHalfType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAHalfType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAHalfType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5883,9 +5883,9 @@ Tensor CUDAHalfType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAHalfType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAHalfType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAHalfType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5899,9 +5899,9 @@ Tensor CUDAHalfType::replication_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAHalfType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAHalfType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAHalfType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5915,9 +5915,9 @@ Tensor CUDAHalfType::replication_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAHalfType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAHalfType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAHalfType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDAHalfType.h b/build/aten/src/ATen/CUDAHalfType.h | |
index 49f9ba241..62ea14337 100644 | |
--- a/build/aten/src/ATen/CUDAHalfType.h | |
+++ b/build/aten/src/ATen/CUDAHalfType.h | |
@@ -661,7 +661,7 @@ struct CUDAHalfType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -710,7 +710,7 @@ struct CUDAHalfType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -721,23 +721,23 @@ struct CUDAHalfType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDAIntType.cpp b/build/aten/src/ATen/CUDAIntType.cpp | |
index c2edf7456..030234d7c 100644 | |
--- a/build/aten/src/ATen/CUDAIntType.cpp | |
+++ b/build/aten/src/ATen/CUDAIntType.cpp | |
@@ -2512,9 +2512,9 @@ std::tuple<Tensor,Tensor> CUDAIntType::_weight_norm_cuda_interface_backward(cons | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDAIntType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDAIntType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDAIntType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2683,9 +2683,9 @@ Tensor CUDAIntType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDAIntType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDAIntType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDAIntType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2727,9 +2727,9 @@ Tensor CUDAIntType::fractional_max_pool3d_backward(const Tensor & grad_output, c | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDAIntType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAIntType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAIntType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2743,9 +2743,9 @@ Tensor CUDAIntType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAIntType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAIntType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAIntType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2759,9 +2759,9 @@ Tensor CUDAIntType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAIntType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAIntType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAIntType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2775,9 +2775,9 @@ Tensor CUDAIntType::replication_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAIntType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAIntType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAIntType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2791,9 +2791,9 @@ Tensor CUDAIntType::replication_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAIntType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAIntType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAIntType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDAIntType.h b/build/aten/src/ATen/CUDAIntType.h | |
index 44f96bd24..6808a7f90 100644 | |
--- a/build/aten/src/ATen/CUDAIntType.h | |
+++ b/build/aten/src/ATen/CUDAIntType.h | |
@@ -383,7 +383,7 @@ struct CUDAIntType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -432,7 +432,7 @@ struct CUDAIntType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -443,23 +443,23 @@ struct CUDAIntType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDALongType.cpp b/build/aten/src/ATen/CUDALongType.cpp | |
index 55ee9e8e7..008e40fef 100644 | |
--- a/build/aten/src/ATen/CUDALongType.cpp | |
+++ b/build/aten/src/ATen/CUDALongType.cpp | |
@@ -2512,9 +2512,9 @@ std::tuple<Tensor,Tensor> CUDALongType::_weight_norm_cuda_interface_backward(con | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDALongType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDALongType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDALongType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2683,9 +2683,9 @@ Tensor CUDALongType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDALongType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDALongType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDALongType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2727,9 +2727,9 @@ Tensor CUDALongType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDALongType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDALongType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDALongType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2743,9 +2743,9 @@ Tensor CUDALongType::reflection_pad1d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDALongType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDALongType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDALongType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2759,9 +2759,9 @@ Tensor CUDALongType::reflection_pad2d_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDALongType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDALongType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDALongType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2775,9 +2775,9 @@ Tensor CUDALongType::replication_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDALongType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDALongType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDALongType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2791,9 +2791,9 @@ Tensor CUDALongType::replication_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDALongType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDALongType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDALongType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDALongType.h b/build/aten/src/ATen/CUDALongType.h | |
index 68f6affe7..6c9938c60 100644 | |
--- a/build/aten/src/ATen/CUDALongType.h | |
+++ b/build/aten/src/ATen/CUDALongType.h | |
@@ -383,7 +383,7 @@ struct CUDALongType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -432,7 +432,7 @@ struct CUDALongType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -443,23 +443,23 @@ struct CUDALongType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/CUDAShortType.cpp b/build/aten/src/ATen/CUDAShortType.cpp | |
index 866a87580..f67d6def7 100644 | |
--- a/build/aten/src/ATen/CUDAShortType.cpp | |
+++ b/build/aten/src/ATen/CUDAShortType.cpp | |
@@ -2512,9 +2512,9 @@ std::tuple<Tensor,Tensor> CUDAShortType::_weight_norm_cuda_interface_backward(co | |
const OptionalDeviceGuard device_guard(device_of(grad_w)); | |
return at::native::weight_norm_cuda_backward(/* actuals */ grad_w, saved_v, saved_g, saved_norms, dim); | |
} | |
-Tensor CUDAShortType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor CUDAShortType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::_standard_gamma_grad_cuda(/* actuals */ self, output); | |
+ return at::native::_standard_gamma_grad_cuda(/* actuals */ self, out); | |
} | |
Tensor CUDAShortType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2683,9 +2683,9 @@ Tensor CUDAShortType::histc(const Tensor & self, int64_t bins, Scalar min, Scala | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::_histc_cuda(/* actuals */ self, bins, min, max); | |
} | |
-Tensor & CUDAShortType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & CUDAShortType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool2d_out_cuda(/* actuals */ out, self, output_size); | |
} | |
Tensor CUDAShortType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2727,9 +2727,9 @@ Tensor CUDAShortType::fractional_max_pool3d_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::fractional_max_pool3d_backward_cuda(/* actuals */ grad_output, self, kernel_size, output_size, indices); | |
} | |
-Tensor & CUDAShortType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAShortType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAShortType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2743,9 +2743,9 @@ Tensor CUDAShortType::reflection_pad1d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAShortType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAShortType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::reflection_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::reflection_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAShortType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2759,9 +2759,9 @@ Tensor CUDAShortType::reflection_pad2d_backward(const Tensor & grad_output, cons | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::reflection_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAShortType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAShortType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad1d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad1d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAShortType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2775,9 +2775,9 @@ Tensor CUDAShortType::replication_pad1d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad1d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAShortType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAShortType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad2d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad2d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAShortType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -2791,9 +2791,9 @@ Tensor CUDAShortType::replication_pad2d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::replication_pad2d_backward_cuda(/* actuals */ grad_output, self, padding); | |
} | |
-Tensor & CUDAShortType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & CUDAShortType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::replication_pad3d_out_cuda(/* actuals */ output, self, padding); | |
+ return at::native::replication_pad3d_out_cuda(/* actuals */ out, self, padding); | |
} | |
Tensor CUDAShortType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/CUDAShortType.h b/build/aten/src/ATen/CUDAShortType.h | |
index c75f926ab..2761e4880 100644 | |
--- a/build/aten/src/ATen/CUDAShortType.h | |
+++ b/build/aten/src/ATen/CUDAShortType.h | |
@@ -383,7 +383,7 @@ struct CUDAShortType final : public CUDATypeDefault { | |
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) const override; | |
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -432,7 +432,7 @@ struct CUDAShortType final : public CUDATypeDefault { | |
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
@@ -443,23 +443,23 @@ struct CUDAShortType final : public CUDATypeDefault { | |
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) const override; | |
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
diff --git a/build/aten/src/ATen/Declarations.yaml b/build/aten/src/ATen/Declarations.yaml | |
index ca39830a2..bcf95bcac 100644 | |
--- a/build/aten/src/ATen/Declarations.yaml | |
+++ b/build/aten/src/ATen/Declarations.yaml | |
@@ -40518,7 +40518,7 @@ | |
deprecated: false | |
- name: _standard_gamma_grad | |
matches_jit_signature: true | |
- schema_string: aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor | |
+ schema_string: aten::_standard_gamma_grad(Tensor self, Tensor out) -> Tensor | |
method_prefix_derived: '' | |
arguments: | |
- annotation: null | |
@@ -40529,7 +40529,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -53713,9 +53713,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: normal_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::normal(Tensor mean, float std=1, *, Generator? generator=None, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -53723,7 +53723,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -53751,7 +53751,7 @@ | |
python_module: '' | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -53801,9 +53801,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: normal_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::normal(float mean, Tensor std, *, Generator? generator=None, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -53811,7 +53811,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -53838,7 +53838,7 @@ | |
python_module: '' | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -53887,9 +53887,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: normal_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::normal(Tensor mean, Tensor std, *, Generator? generator=None, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -53897,7 +53897,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -53924,7 +53924,7 @@ | |
python_module: '' | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -54000,9 +54000,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: _dirichlet_grad_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -54010,7 +54010,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -54035,7 +54035,7 @@ | |
python_module: '' | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -54081,9 +54081,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: binary_cross_entropy_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, | |
- int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!) | |
+ int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -54091,7 +54091,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -54123,7 +54123,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -54177,7 +54177,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: binary_cross_entropy_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, | |
Tensor target, Tensor weight, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -54279,9 +54279,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: mse_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::mse_loss(Tensor self, Tensor target, int reduction=Mean, *, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -54289,7 +54289,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -54315,7 +54315,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -54363,7 +54363,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: mse_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, | |
int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -54455,9 +54455,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: l1_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::l1_loss(Tensor self, Tensor target, int reduction=Mean, *, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -54465,7 +54465,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -54491,7 +54491,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -54539,7 +54539,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: l1_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, | |
int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -54631,9 +54631,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: multi_margin_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar | |
- margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!) | |
+ margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -54641,7 +54641,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -54685,7 +54685,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -54751,7 +54751,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: multi_margin_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, | |
Tensor target, Scalar p, Scalar margin, Tensor weight, int reduction, *, Tensor(a!) | |
grad_input) -> Tensor(a!) | |
@@ -54874,9 +54874,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: multilabel_margin_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -54884,7 +54884,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -54910,7 +54910,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -55056,7 +55056,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: multilabel_margin_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor | |
self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) | |
-> Tensor(a!) | |
@@ -55159,9 +55159,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: nll_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int | |
- reduction=Mean, int ignore_index=-100, *, Tensor(a!) output) -> Tensor(a!) | |
+ reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -55169,7 +55169,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -55207,7 +55207,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -55386,7 +55386,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: nll_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, | |
Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) | |
grad_input) -> Tensor(a!) | |
@@ -55509,9 +55509,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: nll_loss2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, | |
- int reduction=Mean, int ignore_index=-100, *, Tensor(a!) output) -> Tensor(a!) | |
+ int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -55519,7 +55519,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -55557,7 +55557,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -55736,7 +55736,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: nll_loss2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor | |
target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, | |
*, Tensor(a!) grad_input) -> Tensor(a!) | |
@@ -55860,9 +55860,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: smooth_l1_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -55870,7 +55870,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -55896,7 +55896,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -55944,7 +55944,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: smooth_l1_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor | |
target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56036,9 +56036,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: soft_margin_loss_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -56046,7 +56046,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -56072,7 +56072,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -56120,7 +56120,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: soft_margin_loss_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, | |
Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56212,9 +56212,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: elu_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -56222,7 +56222,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -56255,7 +56255,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -56310,9 +56310,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: elu_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, | |
- Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) | |
+ Scalar input_scale, Tensor out, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -56346,7 +56346,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -56367,7 +56367,7 @@ | |
- name: elu_backward | |
matches_jit_signature: true | |
schema_string: aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, | |
- Scalar input_scale, Tensor output) -> Tensor | |
+ Scalar input_scale, Tensor out) -> Tensor | |
method_prefix_derived: '' | |
arguments: | |
- annotation: null | |
@@ -56393,7 +56393,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -56457,8 +56457,8 @@ | |
with_gil: false | |
deprecated: false | |
- name: glu_out | |
- matches_jit_signature: false | |
- schema_string: aten::glu(Tensor self, int dim=-1, *, Tensor(a!) output) -> Tensor(a!) | |
+ matches_jit_signature: true | |
+ schema_string: aten::glu(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -56466,7 +56466,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -56487,7 +56487,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -56529,7 +56529,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: glu_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::glu_backward(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) | |
grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56610,9 +56610,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: hardtanh_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -56620,7 +56620,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -56647,7 +56647,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -56696,7 +56696,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: hardtanh_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, | |
Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56827,9 +56827,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: leaky_relu_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::leaky_relu(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -56837,7 +56837,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -56858,7 +56858,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -56900,7 +56900,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: leaky_relu_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar | |
negative_slope, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -57015,8 +57015,8 @@ | |
with_gil: false | |
deprecated: false | |
- name: log_sigmoid_out | |
- matches_jit_signature: false | |
- schema_string: aten::log_sigmoid(Tensor self, *, Tensor(a!) output) -> Tensor(a!) | |
+ matches_jit_signature: true | |
+ schema_string: aten::log_sigmoid(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -57024,7 +57024,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -57039,7 +57039,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -57153,7 +57153,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: log_sigmoid_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor | |
buffer, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -57238,7 +57238,7 @@ | |
matches_jit_signature: false | |
schema_string: aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, | |
Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -57246,7 +57246,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -57290,7 +57290,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -57357,7 +57357,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: rrelu_with_noise_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, | |
Tensor noise, Scalar lower, Scalar upper, bool training, *, Tensor(a!) grad_input) | |
-> Tensor(a!) | |
@@ -57527,9 +57527,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: softplus_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20, *, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -57537,7 +57537,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -57564,7 +57564,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -57613,9 +57613,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: softplus_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, | |
- Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) | |
+ Scalar threshold, Tensor out, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -57649,7 +57649,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -57670,7 +57670,7 @@ | |
- name: softplus_backward | |
matches_jit_signature: true | |
schema_string: aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, | |
- Scalar threshold, Tensor output) -> Tensor | |
+ Scalar threshold, Tensor out) -> Tensor | |
method_prefix_derived: '' | |
arguments: | |
- annotation: null | |
@@ -57696,7 +57696,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -57715,8 +57715,8 @@ | |
with_gil: false | |
deprecated: false | |
- name: softshrink_out | |
- matches_jit_signature: false | |
- schema_string: aten::softshrink(Tensor self, Scalar lambd=0.5, *, Tensor(a!) output) | |
+ matches_jit_signature: true | |
+ schema_string: aten::softshrink(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) | |
-> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
@@ -57725,7 +57725,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -57746,7 +57746,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -57788,7 +57788,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: softshrink_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar | |
lambd, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -57870,9 +57870,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: adaptive_avg_pool2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool2d(Tensor self, int[2] output_size, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -57880,7 +57880,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -57901,7 +57901,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -58007,9 +58007,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: adaptive_avg_pool3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool3d(Tensor self, int[3] output_size, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -58017,7 +58017,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -58038,7 +58038,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -58080,7 +58080,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: adaptive_avg_pool3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self, | |
*, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -58240,7 +58240,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: adaptive_max_pool2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, | |
Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -58410,7 +58410,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: adaptive_max_pool3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, | |
Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -58492,10 +58492,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: avg_pool2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], | |
int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -58503,7 +58503,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -58550,7 +58550,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -58619,7 +58619,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: avg_pool2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] | |
kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, | |
*, Tensor(a!) grad_input) -> Tensor(a!) | |
@@ -58749,10 +58749,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: avg_pool3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], | |
int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -58760,7 +58760,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -58807,7 +58807,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -58876,7 +58876,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: avg_pool3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] | |
kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, | |
*, Tensor(a!) grad_input) -> Tensor(a!) | |
@@ -59117,7 +59117,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: fractional_max_pool2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, | |
int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) | |
-> Tensor(a!) | |
@@ -59335,7 +59335,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: fractional_max_pool3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, | |
int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) | |
-> Tensor(a!) | |
@@ -59586,7 +59586,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: max_pool2d_with_indices_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor | |
self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool | |
ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
@@ -59872,7 +59872,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: max_pool3d_with_indices_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor | |
self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool | |
ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
@@ -60014,9 +60014,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: max_unpool2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -60024,7 +60024,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -60050,7 +60050,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -60098,7 +60098,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: max_unpool2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor | |
indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -60192,9 +60192,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: max_unpool3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, | |
- int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!) | |
+ int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -60202,7 +60202,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -60240,7 +60240,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -60300,7 +60300,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: max_unpool3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor | |
indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) | |
-> Tensor(a!) | |
@@ -60419,9 +60419,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: reflection_pad1d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::reflection_pad1d(Tensor self, int[2] padding, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -60429,7 +60429,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -60450,7 +60450,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -60492,7 +60492,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: reflection_pad1d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, | |
int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -60576,9 +60576,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: reflection_pad2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::reflection_pad2d(Tensor self, int[4] padding, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -60586,7 +60586,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -60607,7 +60607,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -60649,7 +60649,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: reflection_pad2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, | |
int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -60733,9 +60733,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: replication_pad1d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::replication_pad1d(Tensor self, int[2] padding, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -60743,7 +60743,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -60764,7 +60764,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -60806,7 +60806,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: replication_pad1d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::replication_pad1d_backward(Tensor grad_output, Tensor self, | |
int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -60890,9 +60890,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: replication_pad2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::replication_pad2d(Tensor self, int[4] padding, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -60900,7 +60900,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -60921,7 +60921,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -60963,7 +60963,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: replication_pad2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::replication_pad2d_backward(Tensor grad_output, Tensor self, | |
int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -61047,9 +61047,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: replication_pad3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::replication_pad3d(Tensor self, int[6] padding, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -61057,7 +61057,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -61078,7 +61078,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -61120,7 +61120,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: replication_pad3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::replication_pad3d_backward(Tensor grad_output, Tensor self, | |
int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -61204,9 +61204,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_linear1d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -61214,7 +61214,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -61240,7 +61240,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -61288,7 +61288,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_linear1d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_linear1d_backward(Tensor grad_output, int[1] output_size, | |
int[3] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -61384,9 +61384,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_bilinear2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -61394,7 +61394,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -61420,7 +61420,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -61468,7 +61468,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_bilinear2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, | |
int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -61564,9 +61564,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_bicubic2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, | |
- *, Tensor(a!) output) -> Tensor(a!) | |
+ *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -61574,7 +61574,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -61600,7 +61600,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -61648,7 +61648,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_bicubic2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, | |
int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -61744,9 +61744,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_trilinear3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool | |
- align_corners, *, Tensor(a!) output) -> Tensor(a!) | |
+ align_corners, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -61754,7 +61754,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -61780,7 +61780,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -61828,7 +61828,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_trilinear3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, | |
int[5] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -61924,9 +61924,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_nearest1d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_nearest1d(Tensor self, int[1] output_size, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -61934,7 +61934,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -61955,7 +61955,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -61997,7 +61997,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_nearest1d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, | |
int[3] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -62083,9 +62083,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_nearest2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_nearest2d(Tensor self, int[2] output_size, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -62093,7 +62093,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -62114,7 +62114,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -62156,7 +62156,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_nearest2d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, | |
int[4] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -62242,9 +62242,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_nearest3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_nearest3d(Tensor self, int[3] output_size, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -62252,7 +62252,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -62273,7 +62273,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -62315,7 +62315,7 @@ | |
with_gil: false | |
deprecated: false | |
- name: upsample_nearest3d_backward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, | |
int[5] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -62401,8 +62401,8 @@ | |
with_gil: false | |
deprecated: false | |
- name: sigmoid_backward_out | |
- matches_jit_signature: false | |
- schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor output, *, Tensor(a!) | |
+ matches_jit_signature: true | |
+ schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor out, *, Tensor(a!) | |
grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
@@ -62422,7 +62422,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -62442,7 +62442,7 @@ | |
deprecated: false | |
- name: sigmoid_backward | |
matches_jit_signature: true | |
- schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor | |
+ schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor out) -> Tensor | |
method_prefix_derived: '' | |
arguments: | |
- annotation: null | |
@@ -62453,7 +62453,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -62472,8 +62472,8 @@ | |
with_gil: false | |
deprecated: false | |
- name: tanh_backward_out | |
- matches_jit_signature: false | |
- schema_string: aten::tanh_backward(Tensor grad_output, Tensor output, *, Tensor(a!) | |
+ matches_jit_signature: true | |
+ schema_string: aten::tanh_backward(Tensor grad_output, Tensor out, *, Tensor(a!) | |
grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
@@ -62493,7 +62493,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -62513,7 +62513,7 @@ | |
deprecated: false | |
- name: tanh_backward | |
matches_jit_signature: true | |
- schema_string: aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor | |
+ schema_string: aten::tanh_backward(Tensor grad_output, Tensor out) -> Tensor | |
method_prefix_derived: '' | |
arguments: | |
- annotation: null | |
@@ -62524,7 +62524,7 @@ | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: output | |
+ name: out | |
type: const Tensor & | |
method_of: | |
- Type | |
@@ -62543,10 +62543,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv_transpose2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, | |
Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, | |
- int[2] dilation=1, *, Tensor(a!) output) -> Tensor(a!) | |
+ int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -62554,7 +62554,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -62614,7 +62614,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -63075,10 +63075,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv_transpose3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, | |
Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, | |
- int[3] dilation=1, *, Tensor(a!) output) -> Tensor(a!) | |
+ int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -63086,7 +63086,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -63146,7 +63146,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -63607,10 +63607,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, | |
- Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) output) -> | |
- Tensor(a!) | |
+ Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -63618,7 +63617,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -63664,7 +63663,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -64061,10 +64060,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv_depthwise2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, | |
Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -64072,7 +64071,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -64125,7 +64124,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -64200,10 +64199,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv_depthwise2d_forward_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] | |
kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, | |
- Tensor(a!) output) -> Tensor(a!) | |
+ Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -64211,7 +64210,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -64260,7 +64259,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -64485,10 +64484,9 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, | |
- Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) output) -> | |
- Tensor(a!) | |
+ Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -64496,7 +64494,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -64542,7 +64540,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -64939,10 +64937,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv_dilated2d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, | |
Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -64950,7 +64948,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -65003,7 +65001,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
@@ -65432,10 +65430,10 @@ | |
with_gil: false | |
deprecated: false | |
- name: thnn_conv_dilated3d_out | |
- matches_jit_signature: false | |
+ matches_jit_signature: true | |
schema_string: aten::thnn_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, | |
Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, *, Tensor(a!) | |
- output) -> Tensor(a!) | |
+ out) -> Tensor(a!) | |
method_prefix_derived: '' | |
arguments: | |
- allocate: true | |
@@ -65443,7 +65441,7 @@ | |
dynamic_type: Tensor | |
is_nullable: false | |
kwarg_only: false | |
- name: output | |
+ name: out | |
output: true | |
type: Tensor & | |
- annotation: null | |
@@ -65496,7 +65494,7 @@ | |
python_module: nn | |
returns: | |
- dynamic_type: Tensor | |
- name: output | |
+ name: out | |
type: Tensor & | |
inplace: false | |
is_factory_method: null | |
diff --git a/build/aten/src/ATen/Functions.h b/build/aten/src/ATen/Functions.h | |
index 6b034c69e..6d731d639 100644 | |
--- a/build/aten/src/ATen/Functions.h | |
+++ b/build/aten/src/ATen/Functions.h | |
@@ -1056,7 +1056,7 @@ static inline Tensor zeros(IntArrayRef size, const TensorOptions & options={}); | |
static inline Tensor & zeros_out(Tensor & out, IntArrayRef size); | |
static inline Tensor zeros_like(const Tensor & self); | |
static inline Tensor zeros_like(const Tensor & self, const TensorOptions & options); | |
-static inline Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output); | |
+static inline Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out); | |
static inline Tensor _standard_gamma(const Tensor & self, Generator * generator=nullptr); | |
static inline Tensor poisson(const Tensor & self, Generator * generator=nullptr); | |
static inline Tensor native_norm(const Tensor & self, Scalar p=2); | |
@@ -1294,100 +1294,100 @@ static inline Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & | |
static inline Tensor pow(const Tensor & self, const Tensor & exponent); | |
static inline Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent); | |
static inline Tensor pow(Scalar self, const Tensor & exponent); | |
-static inline Tensor & normal_out(Tensor & output, const Tensor & mean, double std=1, Generator * generator=nullptr); | |
+static inline Tensor & normal_out(Tensor & out, const Tensor & mean, double std=1, Generator * generator=nullptr); | |
static inline Tensor normal(const Tensor & mean, double std=1, Generator * generator=nullptr); | |
-static inline Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator=nullptr); | |
+static inline Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator=nullptr); | |
static inline Tensor normal(double mean, const Tensor & std, Generator * generator=nullptr); | |
-static inline Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator=nullptr); | |
+static inline Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator=nullptr); | |
static inline Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator=nullptr); | |
static inline Tensor alias(const Tensor & self); | |
-static inline Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total); | |
+static inline Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total); | |
static inline Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); | |
-static inline Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
static inline Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
static inline Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); | |
static inline Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); | |
-static inline Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
static inline Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-static inline Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
static inline Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-static inline Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
static inline Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
static inline Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); | |
static inline Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); | |
-static inline Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction); | |
static inline std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); | |
static inline Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); | |
static inline Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); | |
-static inline Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
+static inline Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
static inline Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
static inline std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
static inline std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
static inline Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
static inline Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
-static inline Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
+static inline Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
static inline Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
static inline std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
static inline std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
static inline Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
static inline Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
-static inline Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
static inline Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-static inline Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+static inline Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
static inline Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
static inline Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-static inline Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
+static inline Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
static inline Tensor elu(const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
-static inline Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); | |
-static inline Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); | |
+static inline Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out); | |
+static inline Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out); | |
static inline Tensor & elu_(Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
-static inline Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim=-1); | |
+static inline Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim=-1); | |
static inline Tensor glu(const Tensor & self, int64_t dim=-1); | |
static inline Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim); | |
static inline Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); | |
-static inline Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
+static inline Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
static inline Tensor hardtanh(const Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
static inline Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); | |
static inline Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); | |
static inline Tensor & hardtanh_(Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
-static inline Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope=0.01); | |
+static inline Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope=0.01); | |
static inline Tensor leaky_relu(const Tensor & self, Scalar negative_slope=0.01); | |
static inline Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope); | |
static inline Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope); | |
static inline Tensor & leaky_relu_(Tensor & self, Scalar negative_slope=0.01); | |
-static inline Tensor & log_sigmoid_out(Tensor & output, const Tensor & self); | |
+static inline Tensor & log_sigmoid_out(Tensor & out, const Tensor & self); | |
static inline Tensor log_sigmoid(const Tensor & self); | |
static inline std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self); | |
static inline std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self); | |
static inline Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer); | |
static inline Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); | |
-static inline Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
+static inline Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
static inline Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
static inline Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); | |
static inline Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); | |
static inline Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
-static inline Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta=1, Scalar threshold=20); | |
+static inline Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta=1, Scalar threshold=20); | |
static inline Tensor softplus(const Tensor & self, Scalar beta=1, Scalar threshold=20); | |
-static inline Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); | |
-static inline Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); | |
-static inline Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd=0.5); | |
+static inline Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out); | |
+static inline Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out); | |
+static inline Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd=0.5); | |
static inline Tensor softshrink(const Tensor & self, Scalar lambd=0.5); | |
static inline Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd); | |
static inline Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd); | |
-static inline Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+static inline Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
static inline Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); | |
-static inline Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+static inline Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
static inline Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self); | |
static inline Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); | |
@@ -1399,11 +1399,11 @@ static inline std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out(Tensor & out | |
static inline std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices); | |
static inline Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); | |
-static inline Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
+static inline Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
static inline Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
static inline Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
static inline Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
-static inline Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
+static inline Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
static inline Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
static inline Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
static inline Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
@@ -1423,103 +1423,103 @@ static inline std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out(Tensor & | |
static inline std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); | |
static inline Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); | |
static inline Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); | |
-static inline Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
+static inline Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
static inline Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
static inline Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
static inline Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
-static inline Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
+static inline Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
static inline Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
static inline Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
static inline Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
-static inline Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+static inline Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding); | |
static inline Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding); | |
static inline Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
static inline Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-static inline Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+static inline Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding); | |
static inline Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding); | |
static inline Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
static inline Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-static inline Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+static inline Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding); | |
static inline Tensor replication_pad1d(const Tensor & self, IntArrayRef padding); | |
static inline Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
static inline Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-static inline Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+static inline Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding); | |
static inline Tensor replication_pad2d(const Tensor & self, IntArrayRef padding); | |
static inline Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
static inline Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-static inline Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+static inline Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding); | |
static inline Tensor replication_pad3d(const Tensor & self, IntArrayRef padding); | |
static inline Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
static inline Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-static inline Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+static inline Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
static inline Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-static inline Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+static inline Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
static inline Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-static inline Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+static inline Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
static inline Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-static inline Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+static inline Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
static inline Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
static inline Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-static inline Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+static inline Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
static inline Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
static inline Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
-static inline Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+static inline Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
static inline Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
static inline Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
-static inline Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+static inline Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
static inline Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size); | |
static inline Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
static inline Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
-static inline Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); | |
-static inline Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); | |
-static inline Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); | |
-static inline Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); | |
-static inline Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
+static inline Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out); | |
+static inline Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out); | |
+static inline Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out); | |
+static inline Tensor tanh_backward(const Tensor & grad_output, const Tensor & out); | |
+static inline Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
static inline Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask); | |
-static inline Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
+static inline Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
static inline Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask); | |
-static inline Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
+static inline Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
static inline Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask); | |
-static inline Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
+static inline Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
static inline Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
-static inline Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
+static inline Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static inline Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask); | |
-static inline Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
+static inline Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
static inline Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask); | |
-static inline Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
+static inline Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
static inline Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask); | |
-static inline Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
+static inline Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
static inline Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
@@ -4654,8 +4654,8 @@ static inline Tensor zeros_like(const Tensor & self) { | |
static inline Tensor zeros_like(const Tensor & self, const TensorOptions & options) { | |
return at::getType(options).zeros_like(self, options); | |
} | |
-static inline Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) { | |
- return detail::infer_type(self)._standard_gamma_grad(self, output); | |
+static inline Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) { | |
+ return detail::infer_type(self)._standard_gamma_grad(self, out); | |
} | |
static inline Tensor _standard_gamma(const Tensor & self, Generator * generator) { | |
return detail::infer_type(self)._standard_gamma(self, generator); | |
@@ -5368,20 +5368,20 @@ static inline Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponen | |
static inline Tensor pow(Scalar self, const Tensor & exponent) { | |
return detail::infer_type(exponent).pow(self, exponent); | |
} | |
-static inline Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) { | |
- return detail::infer_type(output).normal_out(output, mean, std, generator); | |
+static inline Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) { | |
+ return detail::infer_type(out).normal_out(out, mean, std, generator); | |
} | |
static inline Tensor normal(const Tensor & mean, double std, Generator * generator) { | |
return detail::infer_type(mean).normal(mean, std, generator); | |
} | |
-static inline Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) { | |
- return detail::infer_type(output).normal_out(output, mean, std, generator); | |
+static inline Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) { | |
+ return detail::infer_type(out).normal_out(out, mean, std, generator); | |
} | |
static inline Tensor normal(double mean, const Tensor & std, Generator * generator) { | |
return detail::infer_type(std).normal(mean, std, generator); | |
} | |
-static inline Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) { | |
- return detail::infer_type(output).normal_out(output, mean, std, generator); | |
+static inline Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) { | |
+ return detail::infer_type(out).normal_out(out, mean, std, generator); | |
} | |
static inline Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) { | |
return detail::infer_type(mean).normal(mean, std, generator); | |
@@ -5389,14 +5389,14 @@ static inline Tensor normal(const Tensor & mean, const Tensor & std, Generator * | |
static inline Tensor alias(const Tensor & self) { | |
return detail::infer_type(self).alias(self); | |
} | |
-static inline Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
- return detail::infer_type(output)._dirichlet_grad_out(output, x, alpha, total); | |
+static inline Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
+ return detail::infer_type(out)._dirichlet_grad_out(out, x, alpha, total); | |
} | |
static inline Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) { | |
return detail::infer_type(x)._dirichlet_grad(x, alpha, total); | |
} | |
-static inline Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
- return detail::infer_type(self).binary_cross_entropy_out(output, self, target, weight, reduction); | |
+static inline Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
+ return detail::infer_type(self).binary_cross_entropy_out(out, self, target, weight, reduction); | |
} | |
static inline Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
return detail::infer_type(self).binary_cross_entropy(self, target, weight, reduction); | |
@@ -5407,8 +5407,8 @@ static inline Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, co | |
static inline Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { | |
return detail::infer_type(self).binary_cross_entropy_backward(grad_output, self, target, weight, reduction); | |
} | |
-static inline Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
- return detail::infer_type(self).mse_loss_out(output, self, target, reduction); | |
+static inline Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { | |
+ return detail::infer_type(self).mse_loss_out(out, self, target, reduction); | |
} | |
static inline Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).mse_loss(self, target, reduction); | |
@@ -5419,8 +5419,8 @@ static inline Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & | |
static inline Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).mse_loss_backward(grad_output, self, target, reduction); | |
} | |
-static inline Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
- return detail::infer_type(self).l1_loss_out(output, self, target, reduction); | |
+static inline Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { | |
+ return detail::infer_type(self).l1_loss_out(out, self, target, reduction); | |
} | |
static inline Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).l1_loss(self, target, reduction); | |
@@ -5431,8 +5431,8 @@ static inline Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & | |
static inline Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).l1_loss_backward(grad_output, self, target, reduction); | |
} | |
-static inline Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
- return detail::infer_type(self).multi_margin_loss_out(output, self, target, p, margin, weight, reduction); | |
+static inline Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
+ return detail::infer_type(self).multi_margin_loss_out(out, self, target, p, margin, weight, reduction); | |
} | |
static inline Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
return detail::infer_type(self).multi_margin_loss(self, target, p, margin, weight, reduction); | |
@@ -5443,8 +5443,8 @@ static inline Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const | |
static inline Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { | |
return detail::infer_type(self).multi_margin_loss_backward(grad_output, self, target, p, margin, weight, reduction); | |
} | |
-static inline Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
- return detail::infer_type(self).multilabel_margin_loss_out(output, self, target, reduction); | |
+static inline Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { | |
+ return detail::infer_type(self).multilabel_margin_loss_out(out, self, target, reduction); | |
} | |
static inline Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).multilabel_margin_loss(self, target, reduction); | |
@@ -5461,8 +5461,8 @@ static inline Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, | |
static inline Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { | |
return detail::infer_type(self).multilabel_margin_loss_backward(grad_output, self, target, reduction, is_target); | |
} | |
-static inline Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
- return detail::infer_type(self).nll_loss_out(output, self, target, weight, reduction, ignore_index); | |
+static inline Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
+ return detail::infer_type(self).nll_loss_out(out, self, target, weight, reduction, ignore_index); | |
} | |
static inline Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
return detail::infer_type(self).nll_loss(self, target, weight, reduction, ignore_index); | |
@@ -5479,8 +5479,8 @@ static inline Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & | |
static inline Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
return detail::infer_type(self).nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-static inline Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
- return detail::infer_type(self).nll_loss2d_out(output, self, target, weight, reduction, ignore_index); | |
+static inline Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
+ return detail::infer_type(self).nll_loss2d_out(out, self, target, weight, reduction, ignore_index); | |
} | |
static inline Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { | |
return detail::infer_type(self).nll_loss2d(self, target, weight, reduction, ignore_index); | |
@@ -5497,8 +5497,8 @@ static inline Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor | |
static inline Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { | |
return detail::infer_type(self).nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-static inline Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
- return detail::infer_type(self).smooth_l1_loss_out(output, self, target, reduction); | |
+static inline Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { | |
+ return detail::infer_type(self).smooth_l1_loss_out(out, self, target, reduction); | |
} | |
static inline Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).smooth_l1_loss(self, target, reduction); | |
@@ -5509,8 +5509,8 @@ static inline Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Te | |
static inline Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).smooth_l1_loss_backward(grad_output, self, target, reduction); | |
} | |
-static inline Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
- return detail::infer_type(self).soft_margin_loss_out(output, self, target, reduction); | |
+static inline Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { | |
+ return detail::infer_type(self).soft_margin_loss_out(out, self, target, reduction); | |
} | |
static inline Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).soft_margin_loss(self, target, reduction); | |
@@ -5521,23 +5521,23 @@ static inline Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const | |
static inline Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { | |
return detail::infer_type(self).soft_margin_loss_backward(grad_output, self, target, reduction); | |
} | |
-static inline Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
- return detail::infer_type(self).elu_out(output, self, alpha, scale, input_scale); | |
+static inline Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
+ return detail::infer_type(self).elu_out(out, self, alpha, scale, input_scale); | |
} | |
static inline Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
return detail::infer_type(self).elu(self, alpha, scale, input_scale); | |
} | |
-static inline Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { | |
- return detail::infer_type(grad_input).elu_backward_out(grad_input, grad_output, alpha, scale, input_scale, output); | |
+static inline Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) { | |
+ return detail::infer_type(grad_input).elu_backward_out(grad_input, grad_output, alpha, scale, input_scale, out); | |
} | |
-static inline Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { | |
- return detail::infer_type(grad_output).elu_backward(grad_output, alpha, scale, input_scale, output); | |
+static inline Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) { | |
+ return detail::infer_type(grad_output).elu_backward(grad_output, alpha, scale, input_scale, out); | |
} | |
static inline Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
return detail::infer_type(self).elu_(self, alpha, scale, input_scale); | |
} | |
-static inline Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) { | |
- return detail::infer_type(self).glu_out(output, self, dim); | |
+static inline Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) { | |
+ return detail::infer_type(self).glu_out(out, self, dim); | |
} | |
static inline Tensor glu(const Tensor & self, int64_t dim) { | |
return detail::infer_type(self).glu(self, dim); | |
@@ -5548,8 +5548,8 @@ static inline Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad | |
static inline Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) { | |
return detail::infer_type(self).glu_backward(grad_output, self, dim); | |
} | |
-static inline Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) { | |
- return detail::infer_type(self).hardtanh_out(output, self, min_val, max_val); | |
+static inline Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) { | |
+ return detail::infer_type(self).hardtanh_out(out, self, min_val, max_val); | |
} | |
static inline Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) { | |
return detail::infer_type(self).hardtanh(self, min_val, max_val); | |
@@ -5563,8 +5563,8 @@ static inline Tensor hardtanh_backward(const Tensor & grad_output, const Tensor | |
static inline Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) { | |
return detail::infer_type(self).hardtanh_(self, min_val, max_val); | |
} | |
-static inline Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) { | |
- return detail::infer_type(self).leaky_relu_out(output, self, negative_slope); | |
+static inline Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) { | |
+ return detail::infer_type(self).leaky_relu_out(out, self, negative_slope); | |
} | |
static inline Tensor leaky_relu(const Tensor & self, Scalar negative_slope) { | |
return detail::infer_type(self).leaky_relu(self, negative_slope); | |
@@ -5578,8 +5578,8 @@ static inline Tensor leaky_relu_backward(const Tensor & grad_output, const Tenso | |
static inline Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) { | |
return detail::infer_type(self).leaky_relu_(self, negative_slope); | |
} | |
-static inline Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) { | |
- return detail::infer_type(self).log_sigmoid_out(output, self); | |
+static inline Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) { | |
+ return detail::infer_type(self).log_sigmoid_out(out, self); | |
} | |
static inline Tensor log_sigmoid(const Tensor & self) { | |
return detail::infer_type(self).log_sigmoid(self); | |
@@ -5596,8 +5596,8 @@ static inline Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tenso | |
static inline Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { | |
return detail::infer_type(self).log_sigmoid_backward(grad_output, self, buffer); | |
} | |
-static inline Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
- return detail::infer_type(self).rrelu_with_noise_out(output, self, noise, lower, upper, training, generator); | |
+static inline Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
+ return detail::infer_type(self).rrelu_with_noise_out(out, self, noise, lower, upper, training, generator); | |
} | |
static inline Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
return detail::infer_type(self).rrelu_with_noise(self, noise, lower, upper, training, generator); | |
@@ -5611,20 +5611,20 @@ static inline Tensor rrelu_with_noise_backward(const Tensor & grad_output, const | |
static inline Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { | |
return detail::infer_type(self).rrelu_with_noise_(self, noise, lower, upper, training, generator); | |
} | |
-static inline Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) { | |
- return detail::infer_type(self).softplus_out(output, self, beta, threshold); | |
+static inline Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) { | |
+ return detail::infer_type(self).softplus_out(out, self, beta, threshold); | |
} | |
static inline Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) { | |
return detail::infer_type(self).softplus(self, beta, threshold); | |
} | |
-static inline Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { | |
- return detail::infer_type(self).softplus_backward_out(grad_input, grad_output, self, beta, threshold, output); | |
+static inline Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) { | |
+ return detail::infer_type(self).softplus_backward_out(grad_input, grad_output, self, beta, threshold, out); | |
} | |
-static inline Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { | |
- return detail::infer_type(self).softplus_backward(grad_output, self, beta, threshold, output); | |
+static inline Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) { | |
+ return detail::infer_type(self).softplus_backward(grad_output, self, beta, threshold, out); | |
} | |
-static inline Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) { | |
- return detail::infer_type(self).softshrink_out(output, self, lambd); | |
+static inline Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) { | |
+ return detail::infer_type(self).softshrink_out(out, self, lambd); | |
} | |
static inline Tensor softshrink(const Tensor & self, Scalar lambd) { | |
return detail::infer_type(self).softshrink(self, lambd); | |
@@ -5635,8 +5635,8 @@ static inline Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor | |
static inline Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) { | |
return detail::infer_type(self).softshrink_backward(grad_output, self, lambd); | |
} | |
-static inline Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) { | |
- return detail::infer_type(self).adaptive_avg_pool2d_out(output, self, output_size); | |
+static inline Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { | |
+ return detail::infer_type(self).adaptive_avg_pool2d_out(out, self, output_size); | |
} | |
static inline Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) { | |
return detail::infer_type(self).adaptive_avg_pool2d(self, output_size); | |
@@ -5647,8 +5647,8 @@ static inline Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef outpu | |
static inline Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) { | |
return detail::infer_type(self)._adaptive_avg_pool2d_backward(grad_output, self); | |
} | |
-static inline Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) { | |
- return detail::infer_type(self).adaptive_avg_pool3d_out(output, self, output_size); | |
+static inline Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { | |
+ return detail::infer_type(self).adaptive_avg_pool3d_out(out, self, output_size); | |
} | |
static inline Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) { | |
return detail::infer_type(self).adaptive_avg_pool3d(self, output_size); | |
@@ -5683,8 +5683,8 @@ static inline Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, con | |
static inline Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { | |
return detail::infer_type(self).adaptive_max_pool3d_backward(grad_output, self, indices); | |
} | |
-static inline Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
- return detail::infer_type(self).avg_pool2d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+static inline Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
+ return detail::infer_type(self).avg_pool2d_out(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
static inline Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
return detail::infer_type(self).avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -5695,8 +5695,8 @@ static inline Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor | |
static inline Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
return detail::infer_type(self).avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
-static inline Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
- return detail::infer_type(self).avg_pool3d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+static inline Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
+ return detail::infer_type(self).avg_pool3d_out(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
static inline Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { | |
return detail::infer_type(self).avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -5755,8 +5755,8 @@ static inline Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, | |
static inline Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) { | |
return detail::infer_type(self).max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); | |
} | |
-static inline Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) { | |
- return detail::infer_type(self).max_unpool2d_out(output, self, indices, output_size); | |
+static inline Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) { | |
+ return detail::infer_type(self).max_unpool2d_out(out, self, indices, output_size); | |
} | |
static inline Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) { | |
return detail::infer_type(self).max_unpool2d(self, indices, output_size); | |
@@ -5767,8 +5767,8 @@ static inline Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tens | |
static inline Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) { | |
return detail::infer_type(self).max_unpool2d_backward(grad_output, self, indices, output_size); | |
} | |
-static inline Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { | |
- return detail::infer_type(self).max_unpool3d_out(output, self, indices, output_size, stride, padding); | |
+static inline Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { | |
+ return detail::infer_type(self).max_unpool3d_out(out, self, indices, output_size, stride, padding); | |
} | |
static inline Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { | |
return detail::infer_type(self).max_unpool3d(self, indices, output_size, stride, padding); | |
@@ -5779,8 +5779,8 @@ static inline Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tens | |
static inline Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { | |
return detail::infer_type(self).max_unpool3d_backward(grad_output, self, indices, output_size, stride, padding); | |
} | |
-static inline Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) { | |
- return detail::infer_type(self).reflection_pad1d_out(output, self, padding); | |
+static inline Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { | |
+ return detail::infer_type(self).reflection_pad1d_out(out, self, padding); | |
} | |
static inline Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).reflection_pad1d(self, padding); | |
@@ -5791,8 +5791,8 @@ static inline Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const | |
static inline Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).reflection_pad1d_backward(grad_output, self, padding); | |
} | |
-static inline Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) { | |
- return detail::infer_type(self).reflection_pad2d_out(output, self, padding); | |
+static inline Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { | |
+ return detail::infer_type(self).reflection_pad2d_out(out, self, padding); | |
} | |
static inline Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).reflection_pad2d(self, padding); | |
@@ -5803,8 +5803,8 @@ static inline Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const | |
static inline Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).reflection_pad2d_backward(grad_output, self, padding); | |
} | |
-static inline Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) { | |
- return detail::infer_type(self).replication_pad1d_out(output, self, padding); | |
+static inline Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { | |
+ return detail::infer_type(self).replication_pad1d_out(out, self, padding); | |
} | |
static inline Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).replication_pad1d(self, padding); | |
@@ -5815,8 +5815,8 @@ static inline Tensor & replication_pad1d_backward_out(Tensor & grad_input, const | |
static inline Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).replication_pad1d_backward(grad_output, self, padding); | |
} | |
-static inline Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) { | |
- return detail::infer_type(self).replication_pad2d_out(output, self, padding); | |
+static inline Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { | |
+ return detail::infer_type(self).replication_pad2d_out(out, self, padding); | |
} | |
static inline Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).replication_pad2d(self, padding); | |
@@ -5827,8 +5827,8 @@ static inline Tensor & replication_pad2d_backward_out(Tensor & grad_input, const | |
static inline Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).replication_pad2d_backward(grad_output, self, padding); | |
} | |
-static inline Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) { | |
- return detail::infer_type(self).replication_pad3d_out(output, self, padding); | |
+static inline Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { | |
+ return detail::infer_type(self).replication_pad3d_out(out, self, padding); | |
} | |
static inline Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).replication_pad3d(self, padding); | |
@@ -5839,8 +5839,8 @@ static inline Tensor & replication_pad3d_backward_out(Tensor & grad_input, const | |
static inline Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { | |
return detail::infer_type(self).replication_pad3d_backward(grad_output, self, padding); | |
} | |
-static inline Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
- return detail::infer_type(self).upsample_linear1d_out(output, self, output_size, align_corners); | |
+static inline Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
+ return detail::infer_type(self).upsample_linear1d_out(out, self, output_size, align_corners); | |
} | |
static inline Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
return detail::infer_type(self).upsample_linear1d(self, output_size, align_corners); | |
@@ -5851,8 +5851,8 @@ static inline Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const | |
static inline Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { | |
return detail::infer_type(grad_output).upsample_linear1d_backward(grad_output, output_size, input_size, align_corners); | |
} | |
-static inline Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
- return detail::infer_type(self).upsample_bilinear2d_out(output, self, output_size, align_corners); | |
+static inline Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
+ return detail::infer_type(self).upsample_bilinear2d_out(out, self, output_size, align_corners); | |
} | |
static inline Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
return detail::infer_type(self).upsample_bilinear2d(self, output_size, align_corners); | |
@@ -5863,8 +5863,8 @@ static inline Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, con | |
static inline Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { | |
return detail::infer_type(grad_output).upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners); | |
} | |
-static inline Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
- return detail::infer_type(self).upsample_bicubic2d_out(output, self, output_size, align_corners); | |
+static inline Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
+ return detail::infer_type(self).upsample_bicubic2d_out(out, self, output_size, align_corners); | |
} | |
static inline Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
return detail::infer_type(self).upsample_bicubic2d(self, output_size, align_corners); | |
@@ -5875,8 +5875,8 @@ static inline Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, cons | |
static inline Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { | |
return detail::infer_type(grad_output).upsample_bicubic2d_backward(grad_output, output_size, input_size, align_corners); | |
} | |
-static inline Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
- return detail::infer_type(self).upsample_trilinear3d_out(output, self, output_size, align_corners); | |
+static inline Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
+ return detail::infer_type(self).upsample_trilinear3d_out(out, self, output_size, align_corners); | |
} | |
static inline Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) { | |
return detail::infer_type(self).upsample_trilinear3d(self, output_size, align_corners); | |
@@ -5887,8 +5887,8 @@ static inline Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, co | |
static inline Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { | |
return detail::infer_type(grad_output).upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners); | |
} | |
-static inline Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) { | |
- return detail::infer_type(self).upsample_nearest1d_out(output, self, output_size); | |
+static inline Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { | |
+ return detail::infer_type(self).upsample_nearest1d_out(out, self, output_size); | |
} | |
static inline Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) { | |
return detail::infer_type(self).upsample_nearest1d(self, output_size); | |
@@ -5899,8 +5899,8 @@ static inline Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, cons | |
static inline Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { | |
return detail::infer_type(grad_output).upsample_nearest1d_backward(grad_output, output_size, input_size); | |
} | |
-static inline Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) { | |
- return detail::infer_type(self).upsample_nearest2d_out(output, self, output_size); | |
+static inline Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { | |
+ return detail::infer_type(self).upsample_nearest2d_out(out, self, output_size); | |
} | |
static inline Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) { | |
return detail::infer_type(self).upsample_nearest2d(self, output_size); | |
@@ -5911,8 +5911,8 @@ static inline Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, cons | |
static inline Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { | |
return detail::infer_type(grad_output).upsample_nearest2d_backward(grad_output, output_size, input_size); | |
} | |
-static inline Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) { | |
- return detail::infer_type(self).upsample_nearest3d_out(output, self, output_size); | |
+static inline Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { | |
+ return detail::infer_type(self).upsample_nearest3d_out(out, self, output_size); | |
} | |
static inline Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) { | |
return detail::infer_type(self).upsample_nearest3d(self, output_size); | |
@@ -5923,20 +5923,20 @@ static inline Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, cons | |
static inline Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { | |
return detail::infer_type(grad_output).upsample_nearest3d_backward(grad_output, output_size, input_size); | |
} | |
-static inline Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { | |
- return detail::infer_type(grad_input).sigmoid_backward_out(grad_input, grad_output, output); | |
+static inline Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) { | |
+ return detail::infer_type(grad_input).sigmoid_backward_out(grad_input, grad_output, out); | |
} | |
-static inline Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) { | |
- return detail::infer_type(grad_output).sigmoid_backward(grad_output, output); | |
+static inline Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out) { | |
+ return detail::infer_type(grad_output).sigmoid_backward(grad_output, out); | |
} | |
-static inline Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { | |
- return detail::infer_type(grad_input).tanh_backward_out(grad_input, grad_output, output); | |
+static inline Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) { | |
+ return detail::infer_type(grad_input).tanh_backward_out(grad_input, grad_output, out); | |
} | |
-static inline Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) { | |
- return detail::infer_type(grad_output).tanh_backward(grad_output, output); | |
+static inline Tensor tanh_backward(const Tensor & grad_output, const Tensor & out) { | |
+ return detail::infer_type(grad_output).tanh_backward(grad_output, out); | |
} | |
-static inline Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { | |
- return detail::infer_type(self).thnn_conv_transpose2d_out(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+static inline Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { | |
+ return detail::infer_type(self).thnn_conv_transpose2d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
static inline Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { | |
return detail::infer_type(self).thnn_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -5953,8 +5953,8 @@ static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backw | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
return detail::infer_type(self).thnn_conv_transpose2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask); | |
} | |
-static inline Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { | |
- return detail::infer_type(self).thnn_conv_transpose3d_out(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+static inline Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { | |
+ return detail::infer_type(self).thnn_conv_transpose3d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
static inline Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { | |
return detail::infer_type(self).thnn_conv_transpose3d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -5971,8 +5971,8 @@ static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backw | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
return detail::infer_type(self).thnn_conv_transpose3d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask); | |
} | |
-static inline Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { | |
- return detail::infer_type(self).thnn_conv2d_out(output, self, weight, kernel_size, bias, stride, padding); | |
+static inline Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { | |
+ return detail::infer_type(self).thnn_conv2d_out(out, self, weight, kernel_size, bias, stride, padding); | |
} | |
static inline Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { | |
return detail::infer_type(self).thnn_conv2d(self, weight, kernel_size, bias, stride, padding); | |
@@ -5989,14 +5989,14 @@ static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Te | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
return detail::infer_type(self).thnn_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-static inline Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
- return detail::infer_type(self).thnn_conv_depthwise2d_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+static inline Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
+ return detail::infer_type(self).thnn_conv_depthwise2d_out(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
static inline Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
return detail::infer_type(self).thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
-static inline Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
- return detail::infer_type(self).thnn_conv_depthwise2d_forward_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+static inline Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
+ return detail::infer_type(self).thnn_conv_depthwise2d_forward_out(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
static inline Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
return detail::infer_type(self).thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -6007,8 +6007,8 @@ static inline std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(T | |
static inline std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) { | |
return detail::infer_type(self).thnn_conv_depthwise2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); | |
} | |
-static inline Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { | |
- return detail::infer_type(self).thnn_conv3d_out(output, self, weight, kernel_size, bias, stride, padding); | |
+static inline Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { | |
+ return detail::infer_type(self).thnn_conv3d_out(out, self, weight, kernel_size, bias, stride, padding); | |
} | |
static inline Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { | |
return detail::infer_type(self).thnn_conv3d(self, weight, kernel_size, bias, stride, padding); | |
@@ -6025,8 +6025,8 @@ static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Te | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) { | |
return detail::infer_type(self).thnn_conv3d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-static inline Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
- return detail::infer_type(self).thnn_conv_dilated2d_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+static inline Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
+ return detail::infer_type(self).thnn_conv_dilated2d_out(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
static inline Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
return detail::infer_type(self).thnn_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -6043,8 +6043,8 @@ static inline std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backwar | |
static inline std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) { | |
return detail::infer_type(self).thnn_conv_dilated2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask); | |
} | |
-static inline Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
- return detail::infer_type(self).thnn_conv_dilated3d_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+static inline Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
+ return detail::infer_type(self).thnn_conv_dilated3d_out(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
static inline Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
return detail::infer_type(self).thnn_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation); | |
diff --git a/build/aten/src/ATen/MSNPUType.cpp b/build/aten/src/ATen/MSNPUType.cpp | |
index 1a61620ea..863dfcc3f 100644 | |
--- a/build/aten/src/ATen/MSNPUType.cpp | |
+++ b/build/aten/src/ATen/MSNPUType.cpp | |
@@ -3238,8 +3238,8 @@ Tensor MSNPUType::zeros_like(const Tensor & self) const { | |
Tensor MSNPUType::zeros_like(const Tensor & self, const TensorOptions & options) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const TensorOptions &)>("zeros_like(Tensor self, TensorOptions options) -> Tensor")(self, options); | |
} | |
-Tensor MSNPUType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("_standard_gamma_grad(Tensor self, Tensor output) -> Tensor")(self, output); | |
+Tensor MSNPUType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("_standard_gamma_grad(Tensor self, Tensor out) -> Tensor")(self, out); | |
} | |
Tensor MSNPUType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Generator *)>("_standard_gamma(Tensor self, Generator * generator) -> Tensor")(self, generator); | |
@@ -4243,20 +4243,20 @@ Tensor & MSNPUType::pow_out(Tensor & out, Scalar self, const Tensor & exponent) | |
Tensor MSNPUType::pow(Scalar self, const Tensor & exponent) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(Scalar, const Tensor &)>("pow(Scalar self, Tensor exponent) -> Tensor")(self, exponent); | |
} | |
-Tensor & MSNPUType::normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, double, Generator *)>("normal_out(Tensor output, Tensor mean, double std, Generator * generator) -> Tensor")(output, mean, std, generator); | |
+Tensor & MSNPUType::normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, double, Generator *)>("normal_out(Tensor out, Tensor mean, double std, Generator * generator) -> Tensor")(out, mean, std, generator); | |
} | |
Tensor MSNPUType::normal(const Tensor & mean, double std, Generator * generator) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, double, Generator *)>("normal(Tensor mean, double std, Generator * generator) -> Tensor")(mean, std, generator); | |
} | |
-Tensor & MSNPUType::normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, double, const Tensor &, Generator *)>("normal_out(Tensor output, double mean, Tensor std, Generator * generator) -> Tensor")(output, mean, std, generator); | |
+Tensor & MSNPUType::normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, double, const Tensor &, Generator *)>("normal_out(Tensor out, double mean, Tensor std, Generator * generator) -> Tensor")(out, mean, std, generator); | |
} | |
Tensor MSNPUType::normal(double mean, const Tensor & std, Generator * generator) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(double, const Tensor &, Generator *)>("normal(double mean, Tensor std, Generator * generator) -> Tensor")(mean, std, generator); | |
} | |
-Tensor & MSNPUType::normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Generator *)>("normal_out(Tensor output, Tensor mean, Tensor std, Generator * generator) -> Tensor")(output, mean, std, generator); | |
+Tensor & MSNPUType::normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Generator *)>("normal_out(Tensor out, Tensor mean, Tensor std, Generator * generator) -> Tensor")(out, mean, std, generator); | |
} | |
Tensor MSNPUType::normal(const Tensor & mean, const Tensor & std, Generator * generator) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Generator *)>("normal(Tensor mean, Tensor std, Generator * generator) -> Tensor")(mean, std, generator); | |
@@ -4264,14 +4264,14 @@ Tensor MSNPUType::normal(const Tensor & mean, const Tensor & std, Generator * ge | |
Tensor MSNPUType::alias(const Tensor & self) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &)>("alias(Tensor self) -> Tensor")(self); | |
} | |
-Tensor & MSNPUType::_dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &)>("_dirichlet_grad_out(Tensor output, Tensor x, Tensor alpha, Tensor total) -> Tensor")(output, x, alpha, total); | |
+Tensor & MSNPUType::_dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &)>("_dirichlet_grad_out(Tensor out, Tensor x, Tensor alpha, Tensor total) -> Tensor")(out, x, alpha, total); | |
} | |
Tensor MSNPUType::_dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &)>("_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor")(x, alpha, total); | |
} | |
-Tensor & MSNPUType::binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy_out(Tensor output, Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(output, self, target, weight, reduction); | |
+Tensor & MSNPUType::binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy_out(Tensor out, Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(out, self, target, weight, reduction); | |
} | |
Tensor MSNPUType::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy(Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(self, target, weight, reduction); | |
@@ -4282,8 +4282,8 @@ Tensor & MSNPUType::binary_cross_entropy_backward_out(Tensor & grad_input, const | |
Tensor MSNPUType::binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(grad_output, self, target, weight, reduction); | |
} | |
-Tensor & MSNPUType::mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("mse_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & MSNPUType::mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("mse_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor MSNPUType::mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("mse_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4294,8 +4294,8 @@ Tensor & MSNPUType::mse_loss_backward_out(Tensor & grad_input, const Tensor & gr | |
Tensor MSNPUType::mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & MSNPUType::l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("l1_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & MSNPUType::l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("l1_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor MSNPUType::l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("l1_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4306,8 +4306,8 @@ Tensor & MSNPUType::l1_loss_backward_out(Tensor & grad_input, const Tensor & gra | |
Tensor MSNPUType::l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & MSNPUType::multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss_out(Tensor output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(output, self, target, p, margin, weight, reduction); | |
+Tensor & MSNPUType::multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss_out(Tensor out, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(out, self, target, p, margin, weight, reduction); | |
} | |
Tensor MSNPUType::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss(Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(self, target, p, margin, weight, reduction); | |
@@ -4318,8 +4318,8 @@ Tensor & MSNPUType::multi_margin_loss_backward_out(Tensor & grad_input, const Te | |
Tensor MSNPUType::multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(grad_output, self, target, p, margin, weight, reduction); | |
} | |
-Tensor & MSNPUType::multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("multilabel_margin_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & MSNPUType::multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("multilabel_margin_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor MSNPUType::multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("multilabel_margin_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4336,8 +4336,8 @@ Tensor & MSNPUType::multilabel_margin_loss_backward_out(Tensor & grad_input, con | |
Tensor MSNPUType::multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t, const Tensor &)>("multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction, Tensor is_target) -> Tensor")(grad_output, self, target, reduction, is_target); | |
} | |
-Tensor & MSNPUType::nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss_out(Tensor output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(output, self, target, weight, reduction, ignore_index); | |
+Tensor & MSNPUType::nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss_out(Tensor out, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(out, self, target, weight, reduction, ignore_index); | |
} | |
Tensor MSNPUType::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss(Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(self, target, weight, reduction, ignore_index); | |
@@ -4354,8 +4354,8 @@ Tensor & MSNPUType::nll_loss_backward_out(Tensor & grad_input, const Tensor & gr | |
Tensor MSNPUType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, const Tensor &)>("nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index, Tensor total_weight) -> Tensor")(grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-Tensor & MSNPUType::nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss2d_out(Tensor output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(output, self, target, weight, reduction, ignore_index); | |
+Tensor & MSNPUType::nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss2d_out(Tensor out, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(out, self, target, weight, reduction, ignore_index); | |
} | |
Tensor MSNPUType::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss2d(Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(self, target, weight, reduction, ignore_index); | |
@@ -4372,8 +4372,8 @@ Tensor & MSNPUType::nll_loss2d_backward_out(Tensor & grad_input, const Tensor & | |
Tensor MSNPUType::nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, const Tensor &)>("nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index, Tensor total_weight) -> Tensor")(grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-Tensor & MSNPUType::smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & MSNPUType::smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor MSNPUType::smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4384,8 +4384,8 @@ Tensor & MSNPUType::smooth_l1_loss_backward_out(Tensor & grad_input, const Tenso | |
Tensor MSNPUType::smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & MSNPUType::soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("soft_margin_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & MSNPUType::soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("soft_margin_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor MSNPUType::soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("soft_margin_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4396,23 +4396,23 @@ Tensor & MSNPUType::soft_margin_loss_backward_out(Tensor & grad_input, const Ten | |
Tensor MSNPUType::soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & MSNPUType::elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar)>("elu_out(Tensor output, Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(output, self, alpha, scale, input_scale); | |
+Tensor & MSNPUType::elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar)>("elu_out(Tensor out, Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(out, self, alpha, scale, input_scale); | |
} | |
Tensor MSNPUType::elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar, Scalar)>("elu(Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(self, alpha, scale, input_scale); | |
} | |
-Tensor & MSNPUType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward_out(Tensor grad_input, Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor")(grad_input, grad_output, alpha, scale, input_scale, output); | |
+Tensor & MSNPUType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward_out(Tensor grad_input, Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor out) -> Tensor")(grad_input, grad_output, alpha, scale, input_scale, out); | |
} | |
-Tensor MSNPUType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor")(grad_output, alpha, scale, input_scale, output); | |
+Tensor MSNPUType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor out) -> Tensor")(grad_output, alpha, scale, input_scale, out); | |
} | |
Tensor & MSNPUType::elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, Scalar, Scalar, Scalar)>("elu_(Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(self, alpha, scale, input_scale); | |
} | |
-Tensor & MSNPUType::glu_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, int64_t)>("glu_out(Tensor output, Tensor self, int64_t dim) -> Tensor")(output, self, dim); | |
+Tensor & MSNPUType::glu_out(Tensor & out, const Tensor & self, int64_t dim) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, int64_t)>("glu_out(Tensor out, Tensor self, int64_t dim) -> Tensor")(out, self, dim); | |
} | |
Tensor MSNPUType::glu(const Tensor & self, int64_t dim) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, int64_t)>("glu(Tensor self, int64_t dim) -> Tensor")(self, dim); | |
@@ -4423,8 +4423,8 @@ Tensor & MSNPUType::glu_backward_out(Tensor & grad_input, const Tensor & grad_ou | |
Tensor MSNPUType::glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("glu_backward(Tensor grad_output, Tensor self, int64_t dim) -> Tensor")(grad_output, self, dim); | |
} | |
-Tensor & MSNPUType::hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("hardtanh_out(Tensor output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(output, self, min_val, max_val); | |
+Tensor & MSNPUType::hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("hardtanh_out(Tensor out, Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(out, self, min_val, max_val); | |
} | |
Tensor MSNPUType::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar)>("hardtanh(Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(self, min_val, max_val); | |
@@ -4438,8 +4438,8 @@ Tensor MSNPUType::hardtanh_backward(const Tensor & grad_output, const Tensor & s | |
Tensor & MSNPUType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, Scalar, Scalar)>("hardtanh_(Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(self, min_val, max_val); | |
} | |
-Tensor & MSNPUType::leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("leaky_relu_out(Tensor output, Tensor self, Scalar negative_slope) -> Tensor")(output, self, negative_slope); | |
+Tensor & MSNPUType::leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("leaky_relu_out(Tensor out, Tensor self, Scalar negative_slope) -> Tensor")(out, self, negative_slope); | |
} | |
Tensor MSNPUType::leaky_relu(const Tensor & self, Scalar negative_slope) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar)>("leaky_relu(Tensor self, Scalar negative_slope) -> Tensor")(self, negative_slope); | |
@@ -4453,8 +4453,8 @@ Tensor MSNPUType::leaky_relu_backward(const Tensor & grad_output, const Tensor & | |
Tensor & MSNPUType::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, Scalar)>("leaky_relu_(Tensor self, Scalar negative_slope) -> Tensor")(self, negative_slope); | |
} | |
-Tensor & MSNPUType::log_sigmoid_out(Tensor & output, const Tensor & self) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &)>("log_sigmoid_out(Tensor output, Tensor self) -> Tensor")(output, self); | |
+Tensor & MSNPUType::log_sigmoid_out(Tensor & out, const Tensor & self) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &)>("log_sigmoid_out(Tensor out, Tensor self) -> Tensor")(out, self); | |
} | |
Tensor MSNPUType::log_sigmoid(const Tensor & self) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &)>("log_sigmoid(Tensor self) -> Tensor")(self); | |
@@ -4471,8 +4471,8 @@ Tensor & MSNPUType::log_sigmoid_backward_out(Tensor & grad_input, const Tensor & | |
Tensor MSNPUType::log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &)>("log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor")(grad_output, self, buffer); | |
} | |
-Tensor & MSNPUType::rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise_out(Tensor output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(output, self, noise, lower, upper, training, generator); | |
+Tensor & MSNPUType::rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise_out(Tensor out, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(out, self, noise, lower, upper, training, generator); | |
} | |
Tensor MSNPUType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise(Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(self, noise, lower, upper, training, generator); | |
@@ -4486,20 +4486,20 @@ Tensor MSNPUType::rrelu_with_noise_backward(const Tensor & grad_output, const Te | |
Tensor & MSNPUType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise_(Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(self, noise, lower, upper, training, generator); | |
} | |
-Tensor & MSNPUType::softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("softplus_out(Tensor output, Tensor self, Scalar beta, Scalar threshold) -> Tensor")(output, self, beta, threshold); | |
+Tensor & MSNPUType::softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("softplus_out(Tensor out, Tensor self, Scalar beta, Scalar threshold) -> Tensor")(out, self, beta, threshold); | |
} | |
Tensor MSNPUType::softplus(const Tensor & self, Scalar beta, Scalar threshold) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar)>("softplus(Tensor self, Scalar beta, Scalar threshold) -> Tensor")(self, beta, threshold); | |
} | |
-Tensor & MSNPUType::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward_out(Tensor grad_input, Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor")(grad_input, grad_output, self, beta, threshold, output); | |
+Tensor & MSNPUType::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward_out(Tensor grad_input, Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor out) -> Tensor")(grad_input, grad_output, self, beta, threshold, out); | |
} | |
-Tensor MSNPUType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor")(grad_output, self, beta, threshold, output); | |
+Tensor MSNPUType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor out) -> Tensor")(grad_output, self, beta, threshold, out); | |
} | |
-Tensor & MSNPUType::softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("softshrink_out(Tensor output, Tensor self, Scalar lambd) -> Tensor")(output, self, lambd); | |
+Tensor & MSNPUType::softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("softshrink_out(Tensor out, Tensor self, Scalar lambd) -> Tensor")(out, self, lambd); | |
} | |
Tensor MSNPUType::softshrink(const Tensor & self, Scalar lambd) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar)>("softshrink(Tensor self, Scalar lambd) -> Tensor")(self, lambd); | |
@@ -4510,8 +4510,8 @@ Tensor & MSNPUType::softshrink_backward_out(Tensor & grad_input, const Tensor & | |
Tensor MSNPUType::softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar)>("softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor")(grad_output, self, lambd); | |
} | |
-Tensor & MSNPUType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool2d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & MSNPUType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool2d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor MSNPUType::adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("adaptive_avg_pool2d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4522,8 +4522,8 @@ Tensor MSNPUType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_s | |
Tensor MSNPUType::_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor")(grad_output, self); | |
} | |
-Tensor & MSNPUType::adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool3d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & MSNPUType::adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool3d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor MSNPUType::adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("adaptive_avg_pool3d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4558,8 +4558,8 @@ Tensor & MSNPUType::adaptive_max_pool3d_backward_out(Tensor & grad_input, const | |
Tensor MSNPUType::adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &)>("adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor")(grad_output, self, indices); | |
} | |
-Tensor & MSNPUType::avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d_out(Tensor output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+Tensor & MSNPUType::avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d_out(Tensor out, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
Tensor MSNPUType::avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d(Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -4570,8 +4570,8 @@ Tensor & MSNPUType::avg_pool2d_backward_out(Tensor & grad_input, const Tensor & | |
Tensor MSNPUType::avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d_backward(Tensor grad_output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
-Tensor & MSNPUType::avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool3d_out(Tensor output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+Tensor & MSNPUType::avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool3d_out(Tensor out, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
Tensor MSNPUType::avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool3d(Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -4630,8 +4630,8 @@ Tensor & MSNPUType::max_pool3d_with_indices_backward_out(Tensor & grad_input, co | |
Tensor MSNPUType::max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, const Tensor &)>("max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor indices) -> Tensor")(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); | |
} | |
-Tensor & MSNPUType::max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d_out(Tensor output, Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(output, self, indices, output_size); | |
+Tensor & MSNPUType::max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d_out(Tensor out, Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(out, self, indices, output_size); | |
} | |
Tensor MSNPUType::max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d(Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(self, indices, output_size); | |
@@ -4642,8 +4642,8 @@ Tensor & MSNPUType::max_unpool2d_backward_out(Tensor & grad_input, const Tensor | |
Tensor MSNPUType::max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(grad_output, self, indices, output_size); | |
} | |
-Tensor & MSNPUType::max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d_out(Tensor output, Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(output, self, indices, output_size, stride, padding); | |
+Tensor & MSNPUType::max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d_out(Tensor out, Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(out, self, indices, output_size, stride, padding); | |
} | |
Tensor MSNPUType::max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d(Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(self, indices, output_size, stride, padding); | |
@@ -4654,8 +4654,8 @@ Tensor & MSNPUType::max_unpool3d_backward_out(Tensor & grad_input, const Tensor | |
Tensor MSNPUType::max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(grad_output, self, indices, output_size, stride, padding); | |
} | |
-Tensor & MSNPUType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad1d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & MSNPUType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad1d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor MSNPUType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("reflection_pad1d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4666,8 +4666,8 @@ Tensor & MSNPUType::reflection_pad1d_backward_out(Tensor & grad_input, const Ten | |
Tensor MSNPUType::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("reflection_pad1d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & MSNPUType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad2d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & MSNPUType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad2d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor MSNPUType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("reflection_pad2d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4678,8 +4678,8 @@ Tensor & MSNPUType::reflection_pad2d_backward_out(Tensor & grad_input, const Ten | |
Tensor MSNPUType::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("reflection_pad2d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & MSNPUType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad1d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & MSNPUType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad1d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor MSNPUType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("replication_pad1d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4690,8 +4690,8 @@ Tensor & MSNPUType::replication_pad1d_backward_out(Tensor & grad_input, const Te | |
Tensor MSNPUType::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("replication_pad1d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & MSNPUType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad2d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & MSNPUType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad2d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor MSNPUType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("replication_pad2d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4702,8 +4702,8 @@ Tensor & MSNPUType::replication_pad2d_backward_out(Tensor & grad_input, const Te | |
Tensor MSNPUType::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("replication_pad2d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & MSNPUType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad3d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & MSNPUType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad3d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor MSNPUType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("replication_pad3d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4714,8 +4714,8 @@ Tensor & MSNPUType::replication_pad3d_backward_out(Tensor & grad_input, const Te | |
Tensor MSNPUType::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("replication_pad3d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & MSNPUType::upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_linear1d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & MSNPUType::upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_linear1d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor MSNPUType::upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_linear1d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4726,8 +4726,8 @@ Tensor & MSNPUType::upsample_linear1d_backward_out(Tensor & grad_input, const Te | |
Tensor MSNPUType::upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_linear1d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & MSNPUType::upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bilinear2d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & MSNPUType::upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bilinear2d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor MSNPUType::upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_bilinear2d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4738,8 +4738,8 @@ Tensor & MSNPUType::upsample_bilinear2d_backward_out(Tensor & grad_input, const | |
Tensor MSNPUType::upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_bilinear2d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & MSNPUType::upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bicubic2d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & MSNPUType::upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bicubic2d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor MSNPUType::upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_bicubic2d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4750,8 +4750,8 @@ Tensor & MSNPUType::upsample_bicubic2d_backward_out(Tensor & grad_input, const T | |
Tensor MSNPUType::upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_bicubic2d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & MSNPUType::upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_trilinear3d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & MSNPUType::upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_trilinear3d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor MSNPUType::upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_trilinear3d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4762,8 +4762,8 @@ Tensor & MSNPUType::upsample_trilinear3d_backward_out(Tensor & grad_input, const | |
Tensor MSNPUType::upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_trilinear3d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & MSNPUType::upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest1d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & MSNPUType::upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest1d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor MSNPUType::upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("upsample_nearest1d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4774,8 +4774,8 @@ Tensor & MSNPUType::upsample_nearest1d_backward_out(Tensor & grad_input, const T | |
Tensor MSNPUType::upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef)>("upsample_nearest1d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size) -> Tensor")(grad_output, output_size, input_size); | |
} | |
-Tensor & MSNPUType::upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest2d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & MSNPUType::upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest2d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor MSNPUType::upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("upsample_nearest2d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4786,8 +4786,8 @@ Tensor & MSNPUType::upsample_nearest2d_backward_out(Tensor & grad_input, const T | |
Tensor MSNPUType::upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef)>("upsample_nearest2d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size) -> Tensor")(grad_output, output_size, input_size); | |
} | |
-Tensor & MSNPUType::upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest3d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & MSNPUType::upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest3d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor MSNPUType::upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("upsample_nearest3d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4798,20 +4798,20 @@ Tensor & MSNPUType::upsample_nearest3d_backward_out(Tensor & grad_input, const T | |
Tensor MSNPUType::upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef)>("upsample_nearest3d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size) -> Tensor")(grad_output, output_size, input_size); | |
} | |
-Tensor & MSNPUType::sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("sigmoid_backward_out(Tensor grad_input, Tensor grad_output, Tensor output) -> Tensor")(grad_input, grad_output, output); | |
+Tensor & MSNPUType::sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("sigmoid_backward_out(Tensor grad_input, Tensor grad_output, Tensor out) -> Tensor")(grad_input, grad_output, out); | |
} | |
-Tensor MSNPUType::sigmoid_backward(const Tensor & grad_output, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor")(grad_output, output); | |
+Tensor MSNPUType::sigmoid_backward(const Tensor & grad_output, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("sigmoid_backward(Tensor grad_output, Tensor out) -> Tensor")(grad_output, out); | |
} | |
-Tensor & MSNPUType::tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("tanh_backward_out(Tensor grad_input, Tensor grad_output, Tensor output) -> Tensor")(grad_input, grad_output, output); | |
+Tensor & MSNPUType::tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("tanh_backward_out(Tensor grad_input, Tensor grad_output, Tensor out) -> Tensor")(grad_input, grad_output, out); | |
} | |
-Tensor MSNPUType::tanh_backward(const Tensor & grad_output, const Tensor & output) const { | |
- return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("tanh_backward(Tensor grad_output, Tensor output) -> Tensor")(grad_output, output); | |
+Tensor MSNPUType::tanh_backward(const Tensor & grad_output, const Tensor & out) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("tanh_backward(Tensor grad_output, Tensor out) -> Tensor")(grad_output, out); | |
} | |
-Tensor & MSNPUType::thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+Tensor & MSNPUType::thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
Tensor MSNPUType::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -4828,8 +4828,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> MSNPUType::thnn_conv_transpose2d_backward | |
std::tuple<Tensor,Tensor,Tensor> MSNPUType::thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
return MSNPUTypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv_transpose2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor columns, Tensor ones, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask); | |
} | |
-Tensor & MSNPUType::thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose3d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+Tensor & MSNPUType::thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose3d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
Tensor MSNPUType::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose3d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -4846,8 +4846,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> MSNPUType::thnn_conv_transpose3d_backward | |
std::tuple<Tensor,Tensor,Tensor> MSNPUType::thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
return MSNPUTypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv_transpose3d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor finput, Tensor fgrad_input, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask); | |
} | |
-Tensor & MSNPUType::thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding); | |
+Tensor & MSNPUType::thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding); | |
} | |
Tensor MSNPUType::thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(self, weight, kernel_size, bias, stride, padding); | |
@@ -4864,14 +4864,14 @@ std::tuple<Tensor &,Tensor &,Tensor &> MSNPUType::thnn_conv2d_backward_out(Tenso | |
std::tuple<Tensor,Tensor,Tensor> MSNPUType::thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
return MSNPUTypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor finput, Tensor fgrad_input, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-Tensor & MSNPUType::thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & MSNPUType::thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor MSNPUType::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
-Tensor & MSNPUType::thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_forward_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & MSNPUType::thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_forward_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor MSNPUType::thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -4882,8 +4882,8 @@ std::tuple<Tensor &,Tensor &> MSNPUType::thnn_conv_depthwise2d_backward_out(Tens | |
std::tuple<Tensor,Tensor> MSNPUType::thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const { | |
return MSNPUTypeDispatch::get_function<std::tuple<Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array<bool,2>)>("thnn_conv_depthwise2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) -> std::tuple<Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); | |
} | |
-Tensor & MSNPUType::thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv3d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding); | |
+Tensor & MSNPUType::thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv3d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding); | |
} | |
Tensor MSNPUType::thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv3d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(self, weight, kernel_size, bias, stride, padding); | |
@@ -4900,8 +4900,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> MSNPUType::thnn_conv3d_backward_out(Tenso | |
std::tuple<Tensor,Tensor,Tensor> MSNPUType::thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
return MSNPUTypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv3d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor finput, Tensor fgrad_input, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-Tensor & MSNPUType::thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & MSNPUType::thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor MSNPUType::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -4918,8 +4918,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> MSNPUType::thnn_conv_dilated2d_backward_o | |
std::tuple<Tensor,Tensor,Tensor> MSNPUType::thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
return MSNPUTypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv_dilated2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor columns, Tensor ones, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask); | |
} | |
-Tensor & MSNPUType::thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated3d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & MSNPUType::thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return MSNPUTypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated3d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor MSNPUType::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return MSNPUTypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated3d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
diff --git a/build/aten/src/ATen/MSNPUType.h b/build/aten/src/ATen/MSNPUType.h | |
index 87db78052..f7804da07 100644 | |
--- a/build/aten/src/ATen/MSNPUType.h | |
+++ b/build/aten/src/ATen/MSNPUType.h | |
@@ -1107,7 +1107,7 @@ struct CAFFE2_API MSNPUType : public TypeDefault { | |
Tensor & zeros_out(Tensor & out, IntArrayRef size) const override; | |
Tensor zeros_like(const Tensor & self) const override; | |
Tensor zeros_like(const Tensor & self, const TensorOptions & options) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -1442,100 +1442,100 @@ struct CAFFE2_API MSNPUType : public TypeDefault { | |
Tensor pow(const Tensor & self, const Tensor & exponent) const override; | |
Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent) const override; | |
Tensor pow(Scalar self, const Tensor & exponent) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, double std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(double mean, const Tensor & std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor alias(const Tensor & self) const override; | |
- Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
+ Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
- Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
- Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
- Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
- Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
+ Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
- Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
+ Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
+ Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) const override; | |
+ Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) const override; | |
Tensor glu(const Tensor & self, int64_t dim) const override; | |
Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
- Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
+ Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const override; | |
- Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const override; | |
+ Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu(const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) const override; | |
- Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) const override; | |
+ Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) const override; | |
Tensor log_sigmoid(const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) const override; | |
std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self) const override; | |
Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
- Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
+ Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
- Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
+ Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) const override; | |
- Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const override; | |
+ Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink(const Tensor & self, Scalar lambd) const override; | |
Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
- Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const override; | |
Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
@@ -1547,11 +1547,11 @@ struct CAFFE2_API MSNPUType : public TypeDefault { | |
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
- Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
- Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
@@ -1571,103 +1571,103 @@ struct CAFFE2_API MSNPUType : public TypeDefault { | |
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) const override; | |
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const override; | |
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const override; | |
- Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
+ Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
- Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor tanh_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const override; | |
- Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
diff --git a/build/aten/src/ATen/NativeFunctions.h b/build/aten/src/ATen/NativeFunctions.h | |
index 145f766cd..97ebc515e 100644 | |
--- a/build/aten/src/ATen/NativeFunctions.h | |
+++ b/build/aten/src/ATen/NativeFunctions.h | |
@@ -730,8 +730,8 @@ CAFFE2_API Tensor zeros(IntArrayRef size, const TensorOptions & options={}); | |
CAFFE2_API Tensor & zeros_out(Tensor & out, IntArrayRef size); | |
CAFFE2_API Tensor zeros_like(const Tensor & self); | |
CAFFE2_API Tensor zeros_like(const Tensor & self, const TensorOptions & options); | |
-CAFFE2_API Tensor _standard_gamma_grad_cpu(const Tensor & self, const Tensor & output); | |
-CAFFE2_API Tensor _standard_gamma_grad_cuda(const Tensor & self, const Tensor & output); | |
+CAFFE2_API Tensor _standard_gamma_grad_cpu(const Tensor & self, const Tensor & out); | |
+CAFFE2_API Tensor _standard_gamma_grad_cuda(const Tensor & self, const Tensor & out); | |
CAFFE2_API Tensor _s_gamma_cpu(const Tensor & self, Generator * generator=nullptr); | |
CAFFE2_API Tensor _s_gamma_cuda(const Tensor & self, Generator * generator=nullptr); | |
CAFFE2_API Tensor _s_poisson_cpu(const Tensor & self, Generator * generator=nullptr); | |
@@ -1087,103 +1087,103 @@ CAFFE2_API Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & ex | |
CAFFE2_API Tensor pow(const Tensor & self, const Tensor & exponent); | |
CAFFE2_API Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent); | |
CAFFE2_API Tensor pow(Scalar self, const Tensor & exponent); | |
-CAFFE2_API Tensor & normal_out(Tensor & output, const Tensor & mean, double std=1, Generator * generator=nullptr); | |
+CAFFE2_API Tensor & normal_out(Tensor & out, const Tensor & mean, double std=1, Generator * generator=nullptr); | |
CAFFE2_API Tensor normal(const Tensor & mean, double std=1, Generator * generator=nullptr); | |
-CAFFE2_API Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator=nullptr); | |
+CAFFE2_API Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator=nullptr); | |
CAFFE2_API Tensor normal(double mean, const Tensor & std, Generator * generator=nullptr); | |
-CAFFE2_API Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator=nullptr); | |
+CAFFE2_API Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator=nullptr); | |
CAFFE2_API Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator=nullptr); | |
CAFFE2_API Tensor alias(const Tensor & self); | |
-CAFFE2_API Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total); | |
+CAFFE2_API Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total); | |
CAFFE2_API Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); | |
-CAFFE2_API Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); | |
CAFFE2_API Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); | |
-CAFFE2_API Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
CAFFE2_API Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-CAFFE2_API Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
CAFFE2_API Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-CAFFE2_API Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); | |
CAFFE2_API Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); | |
-CAFFE2_API Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction); | |
CAFFE2_API std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); | |
CAFFE2_API Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); | |
CAFFE2_API Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); | |
-CAFFE2_API Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
+CAFFE2_API Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
CAFFE2_API Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
CAFFE2_API std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
CAFFE2_API std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
CAFFE2_API Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
CAFFE2_API Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
-CAFFE2_API Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
+CAFFE2_API Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
CAFFE2_API Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); | |
CAFFE2_API std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
CAFFE2_API std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); | |
CAFFE2_API Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
CAFFE2_API Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); | |
-CAFFE2_API Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
CAFFE2_API Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-CAFFE2_API Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
+CAFFE2_API Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); | |
CAFFE2_API Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
CAFFE2_API Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); | |
-CAFFE2_API Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
+CAFFE2_API Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
CAFFE2_API Tensor elu(const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
-CAFFE2_API Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); | |
-CAFFE2_API Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); | |
+CAFFE2_API Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out); | |
+CAFFE2_API Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out); | |
CAFFE2_API Tensor & elu_(Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); | |
-CAFFE2_API Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim=-1); | |
+CAFFE2_API Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim=-1); | |
CAFFE2_API Tensor glu(const Tensor & self, int64_t dim=-1); | |
CAFFE2_API Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim); | |
CAFFE2_API Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); | |
-CAFFE2_API Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
+CAFFE2_API Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
CAFFE2_API Tensor hardtanh(const Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
CAFFE2_API Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); | |
CAFFE2_API Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); | |
CAFFE2_API Tensor & hardtanh_(Tensor & self, Scalar min_val=-1, Scalar max_val=1); | |
-CAFFE2_API Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope=0.01); | |
+CAFFE2_API Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope=0.01); | |
CAFFE2_API Tensor leaky_relu(const Tensor & self, Scalar negative_slope=0.01); | |
CAFFE2_API Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope); | |
CAFFE2_API Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope); | |
CAFFE2_API Tensor & leaky_relu_(Tensor & self, Scalar negative_slope=0.01); | |
-CAFFE2_API Tensor & log_sigmoid_out(Tensor & output, const Tensor & self); | |
+CAFFE2_API Tensor & log_sigmoid_out(Tensor & out, const Tensor & self); | |
CAFFE2_API Tensor log_sigmoid(const Tensor & self); | |
CAFFE2_API std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self); | |
CAFFE2_API std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self); | |
CAFFE2_API Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer); | |
CAFFE2_API Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); | |
-CAFFE2_API Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
+CAFFE2_API Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
CAFFE2_API Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
CAFFE2_API Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); | |
CAFFE2_API Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); | |
CAFFE2_API Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=false, Generator * generator=nullptr); | |
-CAFFE2_API Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta=1, Scalar threshold=20); | |
+CAFFE2_API Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta=1, Scalar threshold=20); | |
CAFFE2_API Tensor softplus(const Tensor & self, Scalar beta=1, Scalar threshold=20); | |
-CAFFE2_API Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); | |
-CAFFE2_API Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); | |
-CAFFE2_API Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd=0.5); | |
+CAFFE2_API Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out); | |
+CAFFE2_API Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out); | |
+CAFFE2_API Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd=0.5); | |
CAFFE2_API Tensor softshrink(const Tensor & self, Scalar lambd=0.5); | |
CAFFE2_API Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd); | |
CAFFE2_API Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd); | |
-CAFFE2_API Tensor & adaptive_avg_pool2d_out_cpu(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
-CAFFE2_API Tensor & adaptive_avg_pool2d_out_cuda(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+CAFFE2_API Tensor & adaptive_avg_pool2d_out_cpu(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
+CAFFE2_API Tensor & adaptive_avg_pool2d_out_cuda(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor adaptive_avg_pool2d_cpu(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor adaptive_avg_pool2d_cuda(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor adaptive_avg_pool2d_backward_cpu(const Tensor & grad_output, const Tensor & self); | |
CAFFE2_API Tensor adaptive_avg_pool2d_backward_cuda(const Tensor & grad_output, const Tensor & self); | |
-CAFFE2_API Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+CAFFE2_API Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self); | |
CAFFE2_API Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); | |
@@ -1195,11 +1195,11 @@ CAFFE2_API std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out(Tensor & output | |
CAFFE2_API std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices); | |
CAFFE2_API Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); | |
-CAFFE2_API Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
+CAFFE2_API Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
CAFFE2_API Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
CAFFE2_API Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
CAFFE2_API Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
-CAFFE2_API Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
+CAFFE2_API Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
CAFFE2_API Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); | |
CAFFE2_API Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
CAFFE2_API Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); | |
@@ -1227,123 +1227,123 @@ CAFFE2_API std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out(Tensor & ou | |
CAFFE2_API std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); | |
CAFFE2_API Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); | |
CAFFE2_API Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); | |
-CAFFE2_API Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
+CAFFE2_API Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
CAFFE2_API Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
CAFFE2_API Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
CAFFE2_API Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); | |
-CAFFE2_API Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
+CAFFE2_API Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); | |
-CAFFE2_API Tensor & reflection_pad1d_out_cpu(Tensor & output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & reflection_pad1d_out_cuda(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & reflection_pad1d_out_cpu(Tensor & out, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & reflection_pad1d_out_cuda(Tensor & out, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad1d_cpu(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad1d_cuda(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & reflection_pad1d_backward_out_cpu(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & reflection_pad1d_backward_out_cuda(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad1d_backward_cpu(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad1d_backward_cuda(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & reflection_pad2d_out_cpu(Tensor & output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & reflection_pad2d_out_cuda(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & reflection_pad2d_out_cpu(Tensor & out, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & reflection_pad2d_out_cuda(Tensor & out, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad2d_cpu(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad2d_cuda(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & reflection_pad2d_backward_out_cpu(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & reflection_pad2d_backward_out_cuda(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad2d_backward_cpu(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor reflection_pad2d_backward_cuda(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & replication_pad1d_out_cpu(Tensor & output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & replication_pad1d_out_cuda(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & replication_pad1d_out_cpu(Tensor & out, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & replication_pad1d_out_cuda(Tensor & out, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad1d_cpu(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad1d_cuda(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & replication_pad1d_backward_out_cpu(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & replication_pad1d_backward_out_cuda(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad1d_backward_cpu(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad1d_backward_cuda(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & replication_pad2d_out_cpu(Tensor & output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & replication_pad2d_out_cuda(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & replication_pad2d_out_cpu(Tensor & out, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & replication_pad2d_out_cuda(Tensor & out, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad2d_cpu(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad2d_cuda(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & replication_pad2d_backward_out_cpu(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & replication_pad2d_backward_out_cuda(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad2d_backward_cpu(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad2d_backward_cuda(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & replication_pad3d_out_cpu(Tensor & output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & replication_pad3d_out_cuda(Tensor & output, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & replication_pad3d_out_cpu(Tensor & out, const Tensor & self, IntArrayRef padding); | |
+CAFFE2_API Tensor & replication_pad3d_out_cuda(Tensor & out, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad3d_cpu(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad3d_cuda(const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & replication_pad3d_backward_out_cpu(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor & replication_pad3d_backward_out_cuda(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad3d_backward_cpu(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
CAFFE2_API Tensor replication_pad3d_backward_cuda(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); | |
-CAFFE2_API Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+CAFFE2_API Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
CAFFE2_API Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-CAFFE2_API Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+CAFFE2_API Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
CAFFE2_API Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-CAFFE2_API Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+CAFFE2_API Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
CAFFE2_API Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-CAFFE2_API Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
+CAFFE2_API Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners); | |
CAFFE2_API Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
CAFFE2_API Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); | |
-CAFFE2_API Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+CAFFE2_API Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
CAFFE2_API Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
-CAFFE2_API Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+CAFFE2_API Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
CAFFE2_API Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
-CAFFE2_API Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size); | |
+CAFFE2_API Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size); | |
CAFFE2_API Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
CAFFE2_API Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); | |
-CAFFE2_API Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); | |
-CAFFE2_API Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); | |
-CAFFE2_API Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); | |
-CAFFE2_API Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); | |
-CAFFE2_API Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
+CAFFE2_API Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out); | |
+CAFFE2_API Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out); | |
+CAFFE2_API Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out); | |
+CAFFE2_API Tensor tanh_backward(const Tensor & grad_output, const Tensor & out); | |
+CAFFE2_API Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
CAFFE2_API Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask); | |
-CAFFE2_API Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
+CAFFE2_API Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
CAFFE2_API Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask); | |
-CAFFE2_API Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
+CAFFE2_API Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
CAFFE2_API Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask); | |
-CAFFE2_API Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
+CAFFE2_API Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
CAFFE2_API Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
-CAFFE2_API Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
+CAFFE2_API Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
CAFFE2_API Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask); | |
-CAFFE2_API Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
+CAFFE2_API Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
CAFFE2_API Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask); | |
-CAFFE2_API Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
+CAFFE2_API Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
CAFFE2_API Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask); | |
-CAFFE2_API Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
+CAFFE2_API Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
CAFFE2_API Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); | |
CAFFE2_API std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
CAFFE2_API std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
diff --git a/build/aten/src/ATen/TypeDefault.cpp b/build/aten/src/ATen/TypeDefault.cpp | |
index cc0318497..025e5b664 100644 | |
--- a/build/aten/src/ATen/TypeDefault.cpp | |
+++ b/build/aten/src/ATen/TypeDefault.cpp | |
@@ -4156,7 +4156,7 @@ Tensor TypeDefault::zeros_like(const Tensor & self, const TensorOptions & option | |
const DeviceGuard device_guard(options.device()); | |
return at::native::zeros_like(/* native_actuals */ self, options); | |
} | |
-Tensor TypeDefault::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor TypeDefault::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
AT_ERROR("_standard_gamma_grad is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::_standard_gamma(const Tensor & self, Generator * generator) const { | |
@@ -5447,25 +5447,25 @@ Tensor TypeDefault::pow(Scalar self, const Tensor & exponent) const { | |
const OptionalDeviceGuard device_guard(device_of(exponent)); | |
return at::native::pow(/* native_actuals */ self, exponent); | |
} | |
-Tensor & TypeDefault::normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const { | |
- const OptionalDeviceGuard device_guard(device_of(output)); | |
- return at::native::normal_out(/* native_actuals */ output, mean, std, generator); | |
+Tensor & TypeDefault::normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const { | |
+ const OptionalDeviceGuard device_guard(device_of(out)); | |
+ return at::native::normal_out(/* native_actuals */ out, mean, std, generator); | |
} | |
Tensor TypeDefault::normal(const Tensor & mean, double std, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(mean)); | |
return at::native::normal(/* native_actuals */ mean, std, generator); | |
} | |
-Tensor & TypeDefault::normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const { | |
- const OptionalDeviceGuard device_guard(device_of(output)); | |
- return at::native::normal_out(/* native_actuals */ output, mean, std, generator); | |
+Tensor & TypeDefault::normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const { | |
+ const OptionalDeviceGuard device_guard(device_of(out)); | |
+ return at::native::normal_out(/* native_actuals */ out, mean, std, generator); | |
} | |
Tensor TypeDefault::normal(double mean, const Tensor & std, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(std)); | |
return at::native::normal(/* native_actuals */ mean, std, generator); | |
} | |
-Tensor & TypeDefault::normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
- const OptionalDeviceGuard device_guard(device_of(output)); | |
- return at::native::normal_out(/* native_actuals */ output, mean, std, generator); | |
+Tensor & TypeDefault::normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
+ const OptionalDeviceGuard device_guard(device_of(out)); | |
+ return at::native::normal_out(/* native_actuals */ out, mean, std, generator); | |
} | |
Tensor TypeDefault::normal(const Tensor & mean, const Tensor & std, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(mean)); | |
@@ -5475,17 +5475,17 @@ Tensor TypeDefault::alias(const Tensor & self) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::alias(/* native_actuals */ self); | |
} | |
-Tensor & TypeDefault::_dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
- const OptionalDeviceGuard device_guard(device_of(output)); | |
- return at::native::_dirichlet_grad_out(/* native_actuals */ output, x, alpha, total); | |
+Tensor & TypeDefault::_dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
+ const OptionalDeviceGuard device_guard(device_of(out)); | |
+ return at::native::_dirichlet_grad_out(/* native_actuals */ out, x, alpha, total); | |
} | |
Tensor TypeDefault::_dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
const OptionalDeviceGuard device_guard(device_of(x)); | |
return at::native::_dirichlet_grad(/* native_actuals */ x, alpha, total); | |
} | |
-Tensor & TypeDefault::binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
+Tensor & TypeDefault::binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::binary_cross_entropy_out(/* native_actuals */ output, self, target, weight, reduction); | |
+ return at::native::binary_cross_entropy_out(/* native_actuals */ out, self, target, weight, reduction); | |
} | |
Tensor TypeDefault::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5499,9 +5499,9 @@ Tensor TypeDefault::binary_cross_entropy_backward(const Tensor & grad_output, co | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::binary_cross_entropy_backward(/* native_actuals */ grad_output, self, target, weight, reduction); | |
} | |
-Tensor & TypeDefault::mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & TypeDefault::mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::mse_loss_out(/* native_actuals */ output, self, target, reduction); | |
+ return at::native::mse_loss_out(/* native_actuals */ out, self, target, reduction); | |
} | |
Tensor TypeDefault::mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5515,9 +5515,9 @@ Tensor TypeDefault::mse_loss_backward(const Tensor & grad_output, const Tensor & | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::mse_loss_backward(/* native_actuals */ grad_output, self, target, reduction); | |
} | |
-Tensor & TypeDefault::l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & TypeDefault::l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::l1_loss_out(/* native_actuals */ output, self, target, reduction); | |
+ return at::native::l1_loss_out(/* native_actuals */ out, self, target, reduction); | |
} | |
Tensor TypeDefault::l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5531,9 +5531,9 @@ Tensor TypeDefault::l1_loss_backward(const Tensor & grad_output, const Tensor & | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::l1_loss_backward(/* native_actuals */ grad_output, self, target, reduction); | |
} | |
-Tensor & TypeDefault::multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
+Tensor & TypeDefault::multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::multi_margin_loss_out(/* native_actuals */ output, self, target, p, margin, weight, reduction); | |
+ return at::native::multi_margin_loss_out(/* native_actuals */ out, self, target, p, margin, weight, reduction); | |
} | |
Tensor TypeDefault::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5547,9 +5547,9 @@ Tensor TypeDefault::multi_margin_loss_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::multi_margin_loss_backward(/* native_actuals */ grad_output, self, target, p, margin, weight, reduction); | |
} | |
-Tensor & TypeDefault::multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & TypeDefault::multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::multilabel_margin_loss_out(/* native_actuals */ output, self, target, reduction); | |
+ return at::native::multilabel_margin_loss_out(/* native_actuals */ out, self, target, reduction); | |
} | |
Tensor TypeDefault::multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5571,9 +5571,9 @@ Tensor TypeDefault::multilabel_margin_loss_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::multilabel_margin_loss_backward(/* native_actuals */ grad_output, self, target, reduction, is_target); | |
} | |
-Tensor & TypeDefault::nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+Tensor & TypeDefault::nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::nll_loss_out(/* native_actuals */ output, self, target, weight, reduction, ignore_index); | |
+ return at::native::nll_loss_out(/* native_actuals */ out, self, target, weight, reduction, ignore_index); | |
} | |
Tensor TypeDefault::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5595,9 +5595,9 @@ Tensor TypeDefault::nll_loss_backward(const Tensor & grad_output, const Tensor & | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::nll_loss_backward(/* native_actuals */ grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-Tensor & TypeDefault::nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+Tensor & TypeDefault::nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::nll_loss2d_out(/* native_actuals */ output, self, target, weight, reduction, ignore_index); | |
+ return at::native::nll_loss2d_out(/* native_actuals */ out, self, target, weight, reduction, ignore_index); | |
} | |
Tensor TypeDefault::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5619,9 +5619,9 @@ Tensor TypeDefault::nll_loss2d_backward(const Tensor & grad_output, const Tensor | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::nll_loss2d_backward(/* native_actuals */ grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-Tensor & TypeDefault::smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & TypeDefault::smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::smooth_l1_loss_out(/* native_actuals */ output, self, target, reduction); | |
+ return at::native::smooth_l1_loss_out(/* native_actuals */ out, self, target, reduction); | |
} | |
Tensor TypeDefault::smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5635,9 +5635,9 @@ Tensor TypeDefault::smooth_l1_loss_backward(const Tensor & grad_output, const Te | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::smooth_l1_loss_backward(/* native_actuals */ grad_output, self, target, reduction); | |
} | |
-Tensor & TypeDefault::soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & TypeDefault::soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::soft_margin_loss_out(/* native_actuals */ output, self, target, reduction); | |
+ return at::native::soft_margin_loss_out(/* native_actuals */ out, self, target, reduction); | |
} | |
Tensor TypeDefault::soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5651,29 +5651,29 @@ Tensor TypeDefault::soft_margin_loss_backward(const Tensor & grad_output, const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::soft_margin_loss_backward(/* native_actuals */ grad_output, self, target, reduction); | |
} | |
-Tensor & TypeDefault::elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
+Tensor & TypeDefault::elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::elu_out(/* native_actuals */ output, self, alpha, scale, input_scale); | |
+ return at::native::elu_out(/* native_actuals */ out, self, alpha, scale, input_scale); | |
} | |
Tensor TypeDefault::elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::elu(/* native_actuals */ self, alpha, scale, input_scale); | |
} | |
-Tensor & TypeDefault::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
+Tensor & TypeDefault::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(grad_input)); | |
- return at::native::elu_backward_out(/* native_actuals */ grad_input, grad_output, alpha, scale, input_scale, output); | |
+ return at::native::elu_backward_out(/* native_actuals */ grad_input, grad_output, alpha, scale, input_scale, out); | |
} | |
-Tensor TypeDefault::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
+Tensor TypeDefault::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
- return at::native::elu_backward(/* native_actuals */ grad_output, alpha, scale, input_scale, output); | |
+ return at::native::elu_backward(/* native_actuals */ grad_output, alpha, scale, input_scale, out); | |
} | |
Tensor & TypeDefault::elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::elu_(/* native_actuals */ self, alpha, scale, input_scale); | |
} | |
-Tensor & TypeDefault::glu_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+Tensor & TypeDefault::glu_out(Tensor & out, const Tensor & self, int64_t dim) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::glu_out(/* native_actuals */ output, self, dim); | |
+ return at::native::glu_out(/* native_actuals */ out, self, dim); | |
} | |
Tensor TypeDefault::glu(const Tensor & self, int64_t dim) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5687,9 +5687,9 @@ Tensor TypeDefault::glu_backward(const Tensor & grad_output, const Tensor & self | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::glu_backward(/* native_actuals */ grad_output, self, dim); | |
} | |
-Tensor & TypeDefault::hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+Tensor & TypeDefault::hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::hardtanh_out(/* native_actuals */ output, self, min_val, max_val); | |
+ return at::native::hardtanh_out(/* native_actuals */ out, self, min_val, max_val); | |
} | |
Tensor TypeDefault::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5707,9 +5707,9 @@ Tensor & TypeDefault::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) c | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::hardtanh_(/* native_actuals */ self, min_val, max_val); | |
} | |
-Tensor & TypeDefault::leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const { | |
+Tensor & TypeDefault::leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::leaky_relu_out(/* native_actuals */ output, self, negative_slope); | |
+ return at::native::leaky_relu_out(/* native_actuals */ out, self, negative_slope); | |
} | |
Tensor TypeDefault::leaky_relu(const Tensor & self, Scalar negative_slope) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5727,9 +5727,9 @@ Tensor & TypeDefault::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::leaky_relu_(/* native_actuals */ self, negative_slope); | |
} | |
-Tensor & TypeDefault::log_sigmoid_out(Tensor & output, const Tensor & self) const { | |
+Tensor & TypeDefault::log_sigmoid_out(Tensor & out, const Tensor & self) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::log_sigmoid_out(/* native_actuals */ output, self); | |
+ return at::native::log_sigmoid_out(/* native_actuals */ out, self); | |
} | |
Tensor TypeDefault::log_sigmoid(const Tensor & self) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5751,9 +5751,9 @@ Tensor TypeDefault::log_sigmoid_backward(const Tensor & grad_output, const Tenso | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::log_sigmoid_backward(/* native_actuals */ grad_output, self, buffer); | |
} | |
-Tensor & TypeDefault::rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+Tensor & TypeDefault::rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::rrelu_with_noise_out(/* native_actuals */ output, self, noise, lower, upper, training, generator); | |
+ return at::native::rrelu_with_noise_out(/* native_actuals */ out, self, noise, lower, upper, training, generator); | |
} | |
Tensor TypeDefault::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5771,25 +5771,25 @@ Tensor & TypeDefault::rrelu_with_noise_(Tensor & self, const Tensor & noise, Sca | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::rrelu_with_noise_(/* native_actuals */ self, noise, lower, upper, training, generator); | |
} | |
-Tensor & TypeDefault::softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const { | |
+Tensor & TypeDefault::softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::softplus_out(/* native_actuals */ output, self, beta, threshold); | |
+ return at::native::softplus_out(/* native_actuals */ out, self, beta, threshold); | |
} | |
Tensor TypeDefault::softplus(const Tensor & self, Scalar beta, Scalar threshold) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::softplus(/* native_actuals */ self, beta, threshold); | |
} | |
-Tensor & TypeDefault::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
+Tensor & TypeDefault::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::softplus_backward_out(/* native_actuals */ grad_input, grad_output, self, beta, threshold, output); | |
+ return at::native::softplus_backward_out(/* native_actuals */ grad_input, grad_output, self, beta, threshold, out); | |
} | |
-Tensor TypeDefault::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
+Tensor TypeDefault::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::softplus_backward(/* native_actuals */ grad_output, self, beta, threshold, output); | |
+ return at::native::softplus_backward(/* native_actuals */ grad_output, self, beta, threshold, out); | |
} | |
-Tensor & TypeDefault::softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
+Tensor & TypeDefault::softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::softshrink_out(/* native_actuals */ output, self, lambd); | |
+ return at::native::softshrink_out(/* native_actuals */ out, self, lambd); | |
} | |
Tensor TypeDefault::softshrink(const Tensor & self, Scalar lambd) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5803,7 +5803,7 @@ Tensor TypeDefault::softshrink_backward(const Tensor & grad_output, const Tensor | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::softshrink_backward(/* native_actuals */ grad_output, self, lambd); | |
} | |
-Tensor & TypeDefault::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & TypeDefault::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
AT_ERROR("adaptive_avg_pool2d_out is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
@@ -5816,9 +5816,9 @@ Tensor TypeDefault::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output | |
Tensor TypeDefault::_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const { | |
AT_ERROR("_adaptive_avg_pool2d_backward is not implemented for type ", toString()); | |
} | |
-Tensor & TypeDefault::adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & TypeDefault::adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::adaptive_avg_pool3d_out(/* native_actuals */ output, self, output_size); | |
+ return at::native::adaptive_avg_pool3d_out(/* native_actuals */ out, self, output_size); | |
} | |
Tensor TypeDefault::adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5864,9 +5864,9 @@ Tensor TypeDefault::adaptive_max_pool3d_backward(const Tensor & grad_output, con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::adaptive_max_pool3d_backward(/* native_actuals */ grad_output, self, indices); | |
} | |
-Tensor & TypeDefault::avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+Tensor & TypeDefault::avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::avg_pool2d_out(/* native_actuals */ output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ return at::native::avg_pool2d_out(/* native_actuals */ out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
Tensor TypeDefault::avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5880,9 +5880,9 @@ Tensor TypeDefault::avg_pool2d_backward(const Tensor & grad_output, const Tensor | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::avg_pool2d_backward(/* native_actuals */ grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
-Tensor & TypeDefault::avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+Tensor & TypeDefault::avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::avg_pool3d_out(/* native_actuals */ output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ return at::native::avg_pool3d_out(/* native_actuals */ out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
Tensor TypeDefault::avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5952,9 +5952,9 @@ Tensor TypeDefault::max_pool3d_with_indices_backward(const Tensor & grad_output, | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::max_pool3d_with_indices_backward(/* native_actuals */ grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); | |
} | |
-Tensor & TypeDefault::max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
+Tensor & TypeDefault::max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::max_unpool2d_out(/* native_actuals */ output, self, indices, output_size); | |
+ return at::native::max_unpool2d_out(/* native_actuals */ out, self, indices, output_size); | |
} | |
Tensor TypeDefault::max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5968,9 +5968,9 @@ Tensor TypeDefault::max_unpool2d_backward(const Tensor & grad_output, const Tens | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::max_unpool2d_backward(/* native_actuals */ grad_output, self, indices, output_size); | |
} | |
-Tensor & TypeDefault::max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
+Tensor & TypeDefault::max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::max_unpool3d_out(/* native_actuals */ output, self, indices, output_size, stride, padding); | |
+ return at::native::max_unpool3d_out(/* native_actuals */ out, self, indices, output_size, stride, padding); | |
} | |
Tensor TypeDefault::max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -5984,7 +5984,7 @@ Tensor TypeDefault::max_unpool3d_backward(const Tensor & grad_output, const Tens | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::max_unpool3d_backward(/* native_actuals */ grad_output, self, indices, output_size, stride, padding); | |
} | |
-Tensor & TypeDefault::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & TypeDefault::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("reflection_pad1d_out is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
@@ -5996,7 +5996,7 @@ Tensor & TypeDefault::reflection_pad1d_backward_out(Tensor & grad_input, const T | |
Tensor TypeDefault::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("reflection_pad1d_backward is not implemented for type ", toString()); | |
} | |
-Tensor & TypeDefault::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & TypeDefault::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("reflection_pad2d_out is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
@@ -6008,7 +6008,7 @@ Tensor & TypeDefault::reflection_pad2d_backward_out(Tensor & grad_input, const T | |
Tensor TypeDefault::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("reflection_pad2d_backward is not implemented for type ", toString()); | |
} | |
-Tensor & TypeDefault::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & TypeDefault::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("replication_pad1d_out is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
@@ -6020,7 +6020,7 @@ Tensor & TypeDefault::replication_pad1d_backward_out(Tensor & grad_input, const | |
Tensor TypeDefault::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("replication_pad1d_backward is not implemented for type ", toString()); | |
} | |
-Tensor & TypeDefault::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & TypeDefault::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("replication_pad2d_out is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
@@ -6032,7 +6032,7 @@ Tensor & TypeDefault::replication_pad2d_backward_out(Tensor & grad_input, const | |
Tensor TypeDefault::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("replication_pad2d_backward is not implemented for type ", toString()); | |
} | |
-Tensor & TypeDefault::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & TypeDefault::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("replication_pad3d_out is not implemented for type ", toString()); | |
} | |
Tensor TypeDefault::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
@@ -6044,9 +6044,9 @@ Tensor & TypeDefault::replication_pad3d_backward_out(Tensor & grad_input, const | |
Tensor TypeDefault::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
AT_ERROR("replication_pad3d_backward is not implemented for type ", toString()); | |
} | |
-Tensor & TypeDefault::upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+Tensor & TypeDefault::upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_linear1d_out(/* native_actuals */ output, self, output_size, align_corners); | |
+ return at::native::upsample_linear1d_out(/* native_actuals */ out, self, output_size, align_corners); | |
} | |
Tensor TypeDefault::upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6060,9 +6060,9 @@ Tensor TypeDefault::upsample_linear1d_backward(const Tensor & grad_output, IntAr | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_linear1d_backward(/* native_actuals */ grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & TypeDefault::upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+Tensor & TypeDefault::upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_bilinear2d_out(/* native_actuals */ output, self, output_size, align_corners); | |
+ return at::native::upsample_bilinear2d_out(/* native_actuals */ out, self, output_size, align_corners); | |
} | |
Tensor TypeDefault::upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6076,9 +6076,9 @@ Tensor TypeDefault::upsample_bilinear2d_backward(const Tensor & grad_output, Int | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_bilinear2d_backward(/* native_actuals */ grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & TypeDefault::upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+Tensor & TypeDefault::upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_bicubic2d_out(/* native_actuals */ output, self, output_size, align_corners); | |
+ return at::native::upsample_bicubic2d_out(/* native_actuals */ out, self, output_size, align_corners); | |
} | |
Tensor TypeDefault::upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6092,9 +6092,9 @@ Tensor TypeDefault::upsample_bicubic2d_backward(const Tensor & grad_output, IntA | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_bicubic2d_backward(/* native_actuals */ grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & TypeDefault::upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+Tensor & TypeDefault::upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_trilinear3d_out(/* native_actuals */ output, self, output_size, align_corners); | |
+ return at::native::upsample_trilinear3d_out(/* native_actuals */ out, self, output_size, align_corners); | |
} | |
Tensor TypeDefault::upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6108,9 +6108,9 @@ Tensor TypeDefault::upsample_trilinear3d_backward(const Tensor & grad_output, In | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_trilinear3d_backward(/* native_actuals */ grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & TypeDefault::upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & TypeDefault::upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_nearest1d_out(/* native_actuals */ output, self, output_size); | |
+ return at::native::upsample_nearest1d_out(/* native_actuals */ out, self, output_size); | |
} | |
Tensor TypeDefault::upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6124,9 +6124,9 @@ Tensor TypeDefault::upsample_nearest1d_backward(const Tensor & grad_output, IntA | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_nearest1d_backward(/* native_actuals */ grad_output, output_size, input_size); | |
} | |
-Tensor & TypeDefault::upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & TypeDefault::upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_nearest2d_out(/* native_actuals */ output, self, output_size); | |
+ return at::native::upsample_nearest2d_out(/* native_actuals */ out, self, output_size); | |
} | |
Tensor TypeDefault::upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6140,9 +6140,9 @@ Tensor TypeDefault::upsample_nearest2d_backward(const Tensor & grad_output, IntA | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_nearest2d_backward(/* native_actuals */ grad_output, output_size, input_size); | |
} | |
-Tensor & TypeDefault::upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & TypeDefault::upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::upsample_nearest3d_out(/* native_actuals */ output, self, output_size); | |
+ return at::native::upsample_nearest3d_out(/* native_actuals */ out, self, output_size); | |
} | |
Tensor TypeDefault::upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6156,25 +6156,25 @@ Tensor TypeDefault::upsample_nearest3d_backward(const Tensor & grad_output, IntA | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
return at::native::upsample_nearest3d_backward(/* native_actuals */ grad_output, output_size, input_size); | |
} | |
-Tensor & TypeDefault::sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
+Tensor & TypeDefault::sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(grad_input)); | |
- return at::native::sigmoid_backward_out(/* native_actuals */ grad_input, grad_output, output); | |
+ return at::native::sigmoid_backward_out(/* native_actuals */ grad_input, grad_output, out); | |
} | |
-Tensor TypeDefault::sigmoid_backward(const Tensor & grad_output, const Tensor & output) const { | |
+Tensor TypeDefault::sigmoid_backward(const Tensor & grad_output, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
- return at::native::sigmoid_backward(/* native_actuals */ grad_output, output); | |
+ return at::native::sigmoid_backward(/* native_actuals */ grad_output, out); | |
} | |
-Tensor & TypeDefault::tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
+Tensor & TypeDefault::tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(grad_input)); | |
- return at::native::tanh_backward_out(/* native_actuals */ grad_input, grad_output, output); | |
+ return at::native::tanh_backward_out(/* native_actuals */ grad_input, grad_output, out); | |
} | |
-Tensor TypeDefault::tanh_backward(const Tensor & grad_output, const Tensor & output) const { | |
+Tensor TypeDefault::tanh_backward(const Tensor & grad_output, const Tensor & out) const { | |
const OptionalDeviceGuard device_guard(device_of(grad_output)); | |
- return at::native::tanh_backward(/* native_actuals */ grad_output, output); | |
+ return at::native::tanh_backward(/* native_actuals */ grad_output, out); | |
} | |
-Tensor & TypeDefault::thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
+Tensor & TypeDefault::thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv_transpose2d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+ return at::native::thnn_conv_transpose2d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
Tensor TypeDefault::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6196,9 +6196,9 @@ std::tuple<Tensor,Tensor,Tensor> TypeDefault::thnn_conv_transpose2d_backward(con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv_transpose2d_backward(/* native_actuals */ grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask); | |
} | |
-Tensor & TypeDefault::thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
+Tensor & TypeDefault::thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv_transpose3d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+ return at::native::thnn_conv_transpose3d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
Tensor TypeDefault::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6220,9 +6220,9 @@ std::tuple<Tensor,Tensor,Tensor> TypeDefault::thnn_conv_transpose3d_backward(con | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv_transpose3d_backward(/* native_actuals */ grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask); | |
} | |
-Tensor & TypeDefault::thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
+Tensor & TypeDefault::thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv2d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding); | |
+ return at::native::thnn_conv2d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding); | |
} | |
Tensor TypeDefault::thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6244,17 +6244,17 @@ std::tuple<Tensor,Tensor,Tensor> TypeDefault::thnn_conv2d_backward(const Tensor | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv2d_backward(/* native_actuals */ grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-Tensor & TypeDefault::thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+Tensor & TypeDefault::thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv_depthwise2d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ return at::native::thnn_conv_depthwise2d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor TypeDefault::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv_depthwise2d(/* native_actuals */ self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
-Tensor & TypeDefault::thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+Tensor & TypeDefault::thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv_depthwise2d_forward_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ return at::native::thnn_conv_depthwise2d_forward_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor TypeDefault::thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6268,9 +6268,9 @@ std::tuple<Tensor,Tensor> TypeDefault::thnn_conv_depthwise2d_backward(const Tens | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv_depthwise2d_backward(/* native_actuals */ grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); | |
} | |
-Tensor & TypeDefault::thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
+Tensor & TypeDefault::thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv3d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding); | |
+ return at::native::thnn_conv3d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding); | |
} | |
Tensor TypeDefault::thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6292,9 +6292,9 @@ std::tuple<Tensor,Tensor,Tensor> TypeDefault::thnn_conv3d_backward(const Tensor | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv3d_backward(/* native_actuals */ grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-Tensor & TypeDefault::thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+Tensor & TypeDefault::thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv_dilated2d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ return at::native::thnn_conv_dilated2d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor TypeDefault::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
@@ -6316,9 +6316,9 @@ std::tuple<Tensor,Tensor,Tensor> TypeDefault::thnn_conv_dilated2d_backward(const | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
return at::native::thnn_conv_dilated2d_backward(/* native_actuals */ grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask); | |
} | |
-Tensor & TypeDefault::thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+Tensor & TypeDefault::thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
- return at::native::thnn_conv_dilated3d_out(/* native_actuals */ output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ return at::native::thnn_conv_dilated3d_out(/* native_actuals */ out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor TypeDefault::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
const OptionalDeviceGuard device_guard(device_of(self)); | |
diff --git a/build/aten/src/ATen/TypeDefault.h b/build/aten/src/ATen/TypeDefault.h | |
index 5448d0b4a..6aa40a2b9 100644 | |
--- a/build/aten/src/ATen/TypeDefault.h | |
+++ b/build/aten/src/ATen/TypeDefault.h | |
@@ -1188,7 +1188,7 @@ struct CAFFE2_API TypeDefault : public TypeExtendedInterface { | |
Tensor & zeros_out(Tensor & out, IntArrayRef size) const override; | |
Tensor zeros_like(const Tensor & self) const override; | |
Tensor zeros_like(const Tensor & self, const TensorOptions & options) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -1523,100 +1523,100 @@ struct CAFFE2_API TypeDefault : public TypeExtendedInterface { | |
Tensor pow(const Tensor & self, const Tensor & exponent) const override; | |
Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent) const override; | |
Tensor pow(Scalar self, const Tensor & exponent) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, double std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(double mean, const Tensor & std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor alias(const Tensor & self) const override; | |
- Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
+ Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
- Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
- Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
- Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
- Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
+ Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
- Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
+ Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
+ Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) const override; | |
+ Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) const override; | |
Tensor glu(const Tensor & self, int64_t dim) const override; | |
Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
- Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
+ Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const override; | |
- Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const override; | |
+ Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu(const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) const override; | |
- Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) const override; | |
+ Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) const override; | |
Tensor log_sigmoid(const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) const override; | |
std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self) const override; | |
Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
- Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
+ Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
- Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
+ Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) const override; | |
- Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const override; | |
+ Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink(const Tensor & self, Scalar lambd) const override; | |
Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
- Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const override; | |
Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
@@ -1628,11 +1628,11 @@ struct CAFFE2_API TypeDefault : public TypeExtendedInterface { | |
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
- Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
- Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
@@ -1652,103 +1652,103 @@ struct CAFFE2_API TypeDefault : public TypeExtendedInterface { | |
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) const override; | |
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const override; | |
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const override; | |
- Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
+ Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
- Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor tanh_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const override; | |
- Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
diff --git a/build/aten/src/ATen/TypeExtendedInterface.h b/build/aten/src/ATen/TypeExtendedInterface.h | |
index 5b2f1c0fe..d6fb3e61a 100644 | |
--- a/build/aten/src/ATen/TypeExtendedInterface.h | |
+++ b/build/aten/src/ATen/TypeExtendedInterface.h | |
@@ -938,7 +938,7 @@ struct CAFFE2_API TypeExtendedInterface : public Type { | |
virtual Tensor & zeros_out(Tensor & out, IntArrayRef size) const = 0; | |
virtual Tensor zeros_like(const Tensor & self) const = 0; | |
virtual Tensor zeros_like(const Tensor & self, const TensorOptions & options) const = 0; | |
- virtual Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const = 0; | |
+ virtual Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const = 0; | |
virtual Tensor _standard_gamma(const Tensor & self, Generator * generator) const = 0; | |
virtual Tensor poisson(const Tensor & self, Generator * generator) const = 0; | |
virtual Tensor native_norm(const Tensor & self, Scalar p) const = 0; | |
@@ -1072,99 +1072,99 @@ struct CAFFE2_API TypeExtendedInterface : public Type { | |
virtual Tensor & renorm_out(Tensor & out, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const = 0; | |
virtual Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & exponent) const = 0; | |
virtual Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent) const = 0; | |
- virtual Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const = 0; | |
+ virtual Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const = 0; | |
virtual Tensor normal(const Tensor & mean, double std, Generator * generator) const = 0; | |
- virtual Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const = 0; | |
+ virtual Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const = 0; | |
virtual Tensor normal(double mean, const Tensor & std, Generator * generator) const = 0; | |
- virtual Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const = 0; | |
+ virtual Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const = 0; | |
virtual Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) const = 0; | |
- virtual Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const = 0; | |
+ virtual Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const = 0; | |
virtual Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const = 0; | |
- virtual Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const = 0; | |
+ virtual Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const = 0; | |
virtual Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const = 0; | |
virtual Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const = 0; | |
virtual Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const = 0; | |
- virtual Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
+ virtual Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
- virtual Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
+ virtual Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
- virtual Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const = 0; | |
+ virtual Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const = 0; | |
virtual Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const = 0; | |
virtual Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const = 0; | |
virtual Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const = 0; | |
- virtual Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
+ virtual Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const = 0; | |
virtual Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const = 0; | |
- virtual Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
+ virtual Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const = 0; | |
virtual Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const = 0; | |
- virtual Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
+ virtual Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const = 0; | |
virtual Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const = 0; | |
virtual Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const = 0; | |
- virtual Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
+ virtual Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
- virtual Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
+ virtual Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
virtual Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const = 0; | |
- virtual Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const = 0; | |
+ virtual Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const = 0; | |
virtual Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const = 0; | |
- virtual Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const = 0; | |
- virtual Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const = 0; | |
+ virtual Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const = 0; | |
+ virtual Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const = 0; | |
virtual Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const = 0; | |
- virtual Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) const = 0; | |
+ virtual Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) const = 0; | |
virtual Tensor glu(const Tensor & self, int64_t dim) const = 0; | |
virtual Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) const = 0; | |
virtual Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const = 0; | |
- virtual Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const = 0; | |
+ virtual Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const = 0; | |
virtual Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const = 0; | |
virtual Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const = 0; | |
virtual Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const = 0; | |
virtual Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const = 0; | |
- virtual Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const = 0; | |
+ virtual Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const = 0; | |
virtual Tensor leaky_relu(const Tensor & self, Scalar negative_slope) const = 0; | |
virtual Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const = 0; | |
virtual Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const = 0; | |
virtual Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) const = 0; | |
- virtual Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) const = 0; | |
+ virtual Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) const = 0; | |
virtual Tensor log_sigmoid(const Tensor & self) const = 0; | |
virtual std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) const = 0; | |
virtual std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self) const = 0; | |
virtual Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const = 0; | |
virtual Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const = 0; | |
- virtual Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const = 0; | |
+ virtual Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const = 0; | |
virtual Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const = 0; | |
virtual Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const = 0; | |
virtual Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const = 0; | |
virtual Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const = 0; | |
- virtual Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const = 0; | |
+ virtual Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const = 0; | |
virtual Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) const = 0; | |
- virtual Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const = 0; | |
- virtual Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const = 0; | |
- virtual Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const = 0; | |
+ virtual Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const = 0; | |
+ virtual Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const = 0; | |
+ virtual Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const = 0; | |
virtual Tensor softshrink(const Tensor & self, Scalar lambd) const = 0; | |
virtual Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const = 0; | |
virtual Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const = 0; | |
- virtual Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const = 0; | |
+ virtual Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const = 0; | |
- virtual Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const = 0; | |
+ virtual Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const = 0; | |
virtual Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const = 0; | |
@@ -1176,11 +1176,11 @@ struct CAFFE2_API TypeExtendedInterface : public Type { | |
virtual std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) const = 0; | |
virtual Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const = 0; | |
- virtual Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
+ virtual Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
virtual Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
virtual Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
virtual Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
- virtual Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
+ virtual Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
virtual Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
virtual Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
virtual Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const = 0; | |
@@ -1200,103 +1200,103 @@ struct CAFFE2_API TypeExtendedInterface : public Type { | |
virtual std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) const = 0; | |
virtual Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const = 0; | |
virtual Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const = 0; | |
- virtual Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const = 0; | |
+ virtual Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const = 0; | |
virtual Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const = 0; | |
virtual Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const = 0; | |
virtual Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const = 0; | |
- virtual Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const = 0; | |
+ virtual Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const = 0; | |
- virtual Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const = 0; | |
+ virtual Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
- virtual Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const = 0; | |
+ virtual Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
- virtual Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const = 0; | |
+ virtual Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
- virtual Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const = 0; | |
+ virtual Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
- virtual Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const = 0; | |
+ virtual Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
virtual Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const = 0; | |
- virtual Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
+ virtual Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
virtual Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
- virtual Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
+ virtual Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
virtual Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
- virtual Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
+ virtual Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
virtual Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
- virtual Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
+ virtual Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const = 0; | |
virtual Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
virtual Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const = 0; | |
- virtual Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const = 0; | |
+ virtual Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const = 0; | |
virtual Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const = 0; | |
- virtual Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const = 0; | |
+ virtual Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const = 0; | |
virtual Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const = 0; | |
- virtual Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const = 0; | |
+ virtual Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const = 0; | |
virtual Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const = 0; | |
virtual Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const = 0; | |
- virtual Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const = 0; | |
- virtual Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) const = 0; | |
- virtual Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const = 0; | |
- virtual Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) const = 0; | |
- virtual Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
+ virtual Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const = 0; | |
+ virtual Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out) const = 0; | |
+ virtual Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const = 0; | |
+ virtual Tensor tanh_backward(const Tensor & grad_output, const Tensor & out) const = 0; | |
+ virtual Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const = 0; | |
- virtual Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
+ virtual Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const = 0; | |
- virtual Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
+ virtual Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const = 0; | |
- virtual Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
+ virtual Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
- virtual Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
+ virtual Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const = 0; | |
- virtual Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
+ virtual Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const = 0; | |
- virtual Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
+ virtual Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const = 0; | |
- virtual Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
+ virtual Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
virtual std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const = 0; | |
diff --git a/build/aten/src/ATen/XLAType.cpp b/build/aten/src/ATen/XLAType.cpp | |
index c31ce4f12..a0dfb1720 100644 | |
--- a/build/aten/src/ATen/XLAType.cpp | |
+++ b/build/aten/src/ATen/XLAType.cpp | |
@@ -3238,8 +3238,8 @@ Tensor XLAType::zeros_like(const Tensor & self) const { | |
Tensor XLAType::zeros_like(const Tensor & self, const TensorOptions & options) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const TensorOptions &)>("zeros_like(Tensor self, TensorOptions options) -> Tensor")(self, options); | |
} | |
-Tensor XLAType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("_standard_gamma_grad(Tensor self, Tensor output) -> Tensor")(self, output); | |
+Tensor XLAType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("_standard_gamma_grad(Tensor self, Tensor out) -> Tensor")(self, out); | |
} | |
Tensor XLAType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Generator *)>("_standard_gamma(Tensor self, Generator * generator) -> Tensor")(self, generator); | |
@@ -4243,20 +4243,20 @@ Tensor & XLAType::pow_out(Tensor & out, Scalar self, const Tensor & exponent) co | |
Tensor XLAType::pow(Scalar self, const Tensor & exponent) const { | |
return XLATypeDispatch::get_function<Tensor (*)(Scalar, const Tensor &)>("pow(Scalar self, Tensor exponent) -> Tensor")(self, exponent); | |
} | |
-Tensor & XLAType::normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, double, Generator *)>("normal_out(Tensor output, Tensor mean, double std, Generator * generator) -> Tensor")(output, mean, std, generator); | |
+Tensor & XLAType::normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, double, Generator *)>("normal_out(Tensor out, Tensor mean, double std, Generator * generator) -> Tensor")(out, mean, std, generator); | |
} | |
Tensor XLAType::normal(const Tensor & mean, double std, Generator * generator) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, double, Generator *)>("normal(Tensor mean, double std, Generator * generator) -> Tensor")(mean, std, generator); | |
} | |
-Tensor & XLAType::normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, double, const Tensor &, Generator *)>("normal_out(Tensor output, double mean, Tensor std, Generator * generator) -> Tensor")(output, mean, std, generator); | |
+Tensor & XLAType::normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, double, const Tensor &, Generator *)>("normal_out(Tensor out, double mean, Tensor std, Generator * generator) -> Tensor")(out, mean, std, generator); | |
} | |
Tensor XLAType::normal(double mean, const Tensor & std, Generator * generator) const { | |
return XLATypeDispatch::get_function<Tensor (*)(double, const Tensor &, Generator *)>("normal(double mean, Tensor std, Generator * generator) -> Tensor")(mean, std, generator); | |
} | |
-Tensor & XLAType::normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Generator *)>("normal_out(Tensor output, Tensor mean, Tensor std, Generator * generator) -> Tensor")(output, mean, std, generator); | |
+Tensor & XLAType::normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Generator *)>("normal_out(Tensor out, Tensor mean, Tensor std, Generator * generator) -> Tensor")(out, mean, std, generator); | |
} | |
Tensor XLAType::normal(const Tensor & mean, const Tensor & std, Generator * generator) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Generator *)>("normal(Tensor mean, Tensor std, Generator * generator) -> Tensor")(mean, std, generator); | |
@@ -4264,14 +4264,14 @@ Tensor XLAType::normal(const Tensor & mean, const Tensor & std, Generator * gene | |
Tensor XLAType::alias(const Tensor & self) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &)>("alias(Tensor self) -> Tensor")(self); | |
} | |
-Tensor & XLAType::_dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &)>("_dirichlet_grad_out(Tensor output, Tensor x, Tensor alpha, Tensor total) -> Tensor")(output, x, alpha, total); | |
+Tensor & XLAType::_dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &)>("_dirichlet_grad_out(Tensor out, Tensor x, Tensor alpha, Tensor total) -> Tensor")(out, x, alpha, total); | |
} | |
Tensor XLAType::_dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &)>("_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor")(x, alpha, total); | |
} | |
-Tensor & XLAType::binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy_out(Tensor output, Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(output, self, target, weight, reduction); | |
+Tensor & XLAType::binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy_out(Tensor out, Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(out, self, target, weight, reduction); | |
} | |
Tensor XLAType::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy(Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(self, target, weight, reduction); | |
@@ -4282,8 +4282,8 @@ Tensor & XLAType::binary_cross_entropy_backward_out(Tensor & grad_input, const T | |
Tensor XLAType::binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t)>("binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int64_t reduction) -> Tensor")(grad_output, self, target, weight, reduction); | |
} | |
-Tensor & XLAType::mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("mse_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & XLAType::mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("mse_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor XLAType::mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("mse_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4294,8 +4294,8 @@ Tensor & XLAType::mse_loss_backward_out(Tensor & grad_input, const Tensor & grad | |
Tensor XLAType::mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & XLAType::l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("l1_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & XLAType::l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("l1_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor XLAType::l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("l1_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4306,8 +4306,8 @@ Tensor & XLAType::l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_ | |
Tensor XLAType::l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & XLAType::multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss_out(Tensor output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(output, self, target, p, margin, weight, reduction); | |
+Tensor & XLAType::multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss_out(Tensor out, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(out, self, target, p, margin, weight, reduction); | |
} | |
Tensor XLAType::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss(Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(self, target, p, margin, weight, reduction); | |
@@ -4318,8 +4318,8 @@ Tensor & XLAType::multi_margin_loss_backward_out(Tensor & grad_input, const Tens | |
Tensor XLAType::multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t)>("multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int64_t reduction) -> Tensor")(grad_output, self, target, p, margin, weight, reduction); | |
} | |
-Tensor & XLAType::multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("multilabel_margin_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & XLAType::multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("multilabel_margin_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor XLAType::multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("multilabel_margin_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4336,8 +4336,8 @@ Tensor & XLAType::multilabel_margin_loss_backward_out(Tensor & grad_input, const | |
Tensor XLAType::multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t, const Tensor &)>("multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction, Tensor is_target) -> Tensor")(grad_output, self, target, reduction, is_target); | |
} | |
-Tensor & XLAType::nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss_out(Tensor output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(output, self, target, weight, reduction, ignore_index); | |
+Tensor & XLAType::nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss_out(Tensor out, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(out, self, target, weight, reduction, ignore_index); | |
} | |
Tensor XLAType::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss(Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(self, target, weight, reduction, ignore_index); | |
@@ -4354,8 +4354,8 @@ Tensor & XLAType::nll_loss_backward_out(Tensor & grad_input, const Tensor & grad | |
Tensor XLAType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, const Tensor &)>("nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index, Tensor total_weight) -> Tensor")(grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-Tensor & XLAType::nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss2d_out(Tensor output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(output, self, target, weight, reduction, ignore_index); | |
+Tensor & XLAType::nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss2d_out(Tensor out, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(out, self, target, weight, reduction, ignore_index); | |
} | |
Tensor XLAType::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t)>("nll_loss2d(Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index) -> Tensor")(self, target, weight, reduction, ignore_index); | |
@@ -4372,8 +4372,8 @@ Tensor & XLAType::nll_loss2d_backward_out(Tensor & grad_input, const Tensor & gr | |
Tensor XLAType::nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, const Tensor &)>("nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int64_t reduction, int64_t ignore_index, Tensor total_weight) -> Tensor")(grad_output, self, target, weight, reduction, ignore_index, total_weight); | |
} | |
-Tensor & XLAType::smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & XLAType::smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor XLAType::smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4384,8 +4384,8 @@ Tensor & XLAType::smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor | |
Tensor XLAType::smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & XLAType::soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("soft_margin_loss_out(Tensor output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(output, self, target, reduction); | |
+Tensor & XLAType::soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, int64_t)>("soft_margin_loss_out(Tensor out, Tensor self, Tensor target, int64_t reduction) -> Tensor")(out, self, target, reduction); | |
} | |
Tensor XLAType::soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("soft_margin_loss(Tensor self, Tensor target, int64_t reduction) -> Tensor")(self, target, reduction); | |
@@ -4396,23 +4396,23 @@ Tensor & XLAType::soft_margin_loss_backward_out(Tensor & grad_input, const Tenso | |
Tensor XLAType::soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, int64_t)>("soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int64_t reduction) -> Tensor")(grad_output, self, target, reduction); | |
} | |
-Tensor & XLAType::elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar)>("elu_out(Tensor output, Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(output, self, alpha, scale, input_scale); | |
+Tensor & XLAType::elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar)>("elu_out(Tensor out, Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(out, self, alpha, scale, input_scale); | |
} | |
Tensor XLAType::elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar, Scalar)>("elu(Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(self, alpha, scale, input_scale); | |
} | |
-Tensor & XLAType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward_out(Tensor grad_input, Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor")(grad_input, grad_output, alpha, scale, input_scale, output); | |
+Tensor & XLAType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward_out(Tensor grad_input, Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor out) -> Tensor")(grad_input, grad_output, alpha, scale, input_scale, out); | |
} | |
-Tensor XLAType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor")(grad_output, alpha, scale, input_scale, output); | |
+Tensor XLAType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar, Scalar, const Tensor &)>("elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor out) -> Tensor")(grad_output, alpha, scale, input_scale, out); | |
} | |
Tensor & XLAType::elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, Scalar, Scalar, Scalar)>("elu_(Tensor self, Scalar alpha, Scalar scale, Scalar input_scale) -> Tensor")(self, alpha, scale, input_scale); | |
} | |
-Tensor & XLAType::glu_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, int64_t)>("glu_out(Tensor output, Tensor self, int64_t dim) -> Tensor")(output, self, dim); | |
+Tensor & XLAType::glu_out(Tensor & out, const Tensor & self, int64_t dim) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, int64_t)>("glu_out(Tensor out, Tensor self, int64_t dim) -> Tensor")(out, self, dim); | |
} | |
Tensor XLAType::glu(const Tensor & self, int64_t dim) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, int64_t)>("glu(Tensor self, int64_t dim) -> Tensor")(self, dim); | |
@@ -4423,8 +4423,8 @@ Tensor & XLAType::glu_backward_out(Tensor & grad_input, const Tensor & grad_outp | |
Tensor XLAType::glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, int64_t)>("glu_backward(Tensor grad_output, Tensor self, int64_t dim) -> Tensor")(grad_output, self, dim); | |
} | |
-Tensor & XLAType::hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("hardtanh_out(Tensor output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(output, self, min_val, max_val); | |
+Tensor & XLAType::hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("hardtanh_out(Tensor out, Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(out, self, min_val, max_val); | |
} | |
Tensor XLAType::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar)>("hardtanh(Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(self, min_val, max_val); | |
@@ -4438,8 +4438,8 @@ Tensor XLAType::hardtanh_backward(const Tensor & grad_output, const Tensor & sel | |
Tensor & XLAType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, Scalar, Scalar)>("hardtanh_(Tensor self, Scalar min_val, Scalar max_val) -> Tensor")(self, min_val, max_val); | |
} | |
-Tensor & XLAType::leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("leaky_relu_out(Tensor output, Tensor self, Scalar negative_slope) -> Tensor")(output, self, negative_slope); | |
+Tensor & XLAType::leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("leaky_relu_out(Tensor out, Tensor self, Scalar negative_slope) -> Tensor")(out, self, negative_slope); | |
} | |
Tensor XLAType::leaky_relu(const Tensor & self, Scalar negative_slope) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar)>("leaky_relu(Tensor self, Scalar negative_slope) -> Tensor")(self, negative_slope); | |
@@ -4453,8 +4453,8 @@ Tensor XLAType::leaky_relu_backward(const Tensor & grad_output, const Tensor & s | |
Tensor & XLAType::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, Scalar)>("leaky_relu_(Tensor self, Scalar negative_slope) -> Tensor")(self, negative_slope); | |
} | |
-Tensor & XLAType::log_sigmoid_out(Tensor & output, const Tensor & self) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &)>("log_sigmoid_out(Tensor output, Tensor self) -> Tensor")(output, self); | |
+Tensor & XLAType::log_sigmoid_out(Tensor & out, const Tensor & self) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &)>("log_sigmoid_out(Tensor out, Tensor self) -> Tensor")(out, self); | |
} | |
Tensor XLAType::log_sigmoid(const Tensor & self) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &)>("log_sigmoid(Tensor self) -> Tensor")(self); | |
@@ -4471,8 +4471,8 @@ Tensor & XLAType::log_sigmoid_backward_out(Tensor & grad_input, const Tensor & g | |
Tensor XLAType::log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &)>("log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor")(grad_output, self, buffer); | |
} | |
-Tensor & XLAType::rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise_out(Tensor output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(output, self, noise, lower, upper, training, generator); | |
+Tensor & XLAType::rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise_out(Tensor out, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(out, self, noise, lower, upper, training, generator); | |
} | |
Tensor XLAType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise(Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(self, noise, lower, upper, training, generator); | |
@@ -4486,20 +4486,20 @@ Tensor XLAType::rrelu_with_noise_backward(const Tensor & grad_output, const Tens | |
Tensor & XLAType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar, bool, Generator *)>("rrelu_with_noise_(Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator * generator) -> Tensor")(self, noise, lower, upper, training, generator); | |
} | |
-Tensor & XLAType::softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("softplus_out(Tensor output, Tensor self, Scalar beta, Scalar threshold) -> Tensor")(output, self, beta, threshold); | |
+Tensor & XLAType::softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar, Scalar)>("softplus_out(Tensor out, Tensor self, Scalar beta, Scalar threshold) -> Tensor")(out, self, beta, threshold); | |
} | |
Tensor XLAType::softplus(const Tensor & self, Scalar beta, Scalar threshold) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar, Scalar)>("softplus(Tensor self, Scalar beta, Scalar threshold) -> Tensor")(self, beta, threshold); | |
} | |
-Tensor & XLAType::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward_out(Tensor grad_input, Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor")(grad_input, grad_output, self, beta, threshold, output); | |
+Tensor & XLAType::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward_out(Tensor grad_input, Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor out) -> Tensor")(grad_input, grad_output, self, beta, threshold, out); | |
} | |
-Tensor XLAType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor")(grad_output, self, beta, threshold, output); | |
+Tensor XLAType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &)>("softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor out) -> Tensor")(grad_output, self, beta, threshold, out); | |
} | |
-Tensor & XLAType::softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("softshrink_out(Tensor output, Tensor self, Scalar lambd) -> Tensor")(output, self, lambd); | |
+Tensor & XLAType::softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, Scalar)>("softshrink_out(Tensor out, Tensor self, Scalar lambd) -> Tensor")(out, self, lambd); | |
} | |
Tensor XLAType::softshrink(const Tensor & self, Scalar lambd) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, Scalar)>("softshrink(Tensor self, Scalar lambd) -> Tensor")(self, lambd); | |
@@ -4510,8 +4510,8 @@ Tensor & XLAType::softshrink_backward_out(Tensor & grad_input, const Tensor & gr | |
Tensor XLAType::softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, Scalar)>("softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor")(grad_output, self, lambd); | |
} | |
-Tensor & XLAType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool2d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & XLAType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool2d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor XLAType::adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("adaptive_avg_pool2d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4522,8 +4522,8 @@ Tensor XLAType::_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_siz | |
Tensor XLAType::_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor")(grad_output, self); | |
} | |
-Tensor & XLAType::adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool3d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & XLAType::adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("adaptive_avg_pool3d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor XLAType::adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("adaptive_avg_pool3d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4558,8 +4558,8 @@ Tensor & XLAType::adaptive_max_pool3d_backward_out(Tensor & grad_input, const Te | |
Tensor XLAType::adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &)>("adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor")(grad_output, self, indices); | |
} | |
-Tensor & XLAType::avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d_out(Tensor output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+Tensor & XLAType::avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d_out(Tensor out, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
Tensor XLAType::avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d(Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -4570,8 +4570,8 @@ Tensor & XLAType::avg_pool2d_backward_out(Tensor & grad_input, const Tensor & gr | |
Tensor XLAType::avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool2d_backward(Tensor grad_output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
-Tensor & XLAType::avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool3d_out(Tensor output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+Tensor & XLAType::avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool3d_out(Tensor out, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
Tensor XLAType::avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool)>("avg_pool3d(Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) -> Tensor")(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -4630,8 +4630,8 @@ Tensor & XLAType::max_pool3d_with_indices_backward_out(Tensor & grad_input, cons | |
Tensor XLAType::max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, const Tensor &)>("max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor indices) -> Tensor")(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); | |
} | |
-Tensor & XLAType::max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d_out(Tensor output, Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(output, self, indices, output_size); | |
+Tensor & XLAType::max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d_out(Tensor out, Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(out, self, indices, output_size); | |
} | |
Tensor XLAType::max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d(Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(self, indices, output_size); | |
@@ -4642,8 +4642,8 @@ Tensor & XLAType::max_unpool2d_backward_out(Tensor & grad_input, const Tensor & | |
Tensor XLAType::max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef)>("max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, IntArrayRef output_size) -> Tensor")(grad_output, self, indices, output_size); | |
} | |
-Tensor & XLAType::max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d_out(Tensor output, Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(output, self, indices, output_size, stride, padding); | |
+Tensor & XLAType::max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d_out(Tensor out, Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(out, self, indices, output_size, stride, padding); | |
} | |
Tensor XLAType::max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d(Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(self, indices, output_size, stride, padding); | |
@@ -4654,8 +4654,8 @@ Tensor & XLAType::max_unpool3d_backward_out(Tensor & grad_input, const Tensor & | |
Tensor XLAType::max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) -> Tensor")(grad_output, self, indices, output_size, stride, padding); | |
} | |
-Tensor & XLAType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad1d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & XLAType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad1d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor XLAType::reflection_pad1d(const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("reflection_pad1d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4666,8 +4666,8 @@ Tensor & XLAType::reflection_pad1d_backward_out(Tensor & grad_input, const Tenso | |
Tensor XLAType::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("reflection_pad1d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & XLAType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad2d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & XLAType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("reflection_pad2d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor XLAType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("reflection_pad2d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4678,8 +4678,8 @@ Tensor & XLAType::reflection_pad2d_backward_out(Tensor & grad_input, const Tenso | |
Tensor XLAType::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("reflection_pad2d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & XLAType::replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad1d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & XLAType::replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad1d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor XLAType::replication_pad1d(const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("replication_pad1d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4690,8 +4690,8 @@ Tensor & XLAType::replication_pad1d_backward_out(Tensor & grad_input, const Tens | |
Tensor XLAType::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("replication_pad1d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & XLAType::replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad2d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & XLAType::replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad2d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor XLAType::replication_pad2d(const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("replication_pad2d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4702,8 +4702,8 @@ Tensor & XLAType::replication_pad2d_backward_out(Tensor & grad_input, const Tens | |
Tensor XLAType::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("replication_pad2d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & XLAType::replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad3d_out(Tensor output, Tensor self, IntArrayRef padding) -> Tensor")(output, self, padding); | |
+Tensor & XLAType::replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("replication_pad3d_out(Tensor out, Tensor self, IntArrayRef padding) -> Tensor")(out, self, padding); | |
} | |
Tensor XLAType::replication_pad3d(const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("replication_pad3d(Tensor self, IntArrayRef padding) -> Tensor")(self, padding); | |
@@ -4714,8 +4714,8 @@ Tensor & XLAType::replication_pad3d_backward_out(Tensor & grad_input, const Tens | |
Tensor XLAType::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef)>("replication_pad3d_backward(Tensor grad_output, Tensor self, IntArrayRef padding) -> Tensor")(grad_output, self, padding); | |
} | |
-Tensor & XLAType::upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_linear1d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & XLAType::upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_linear1d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor XLAType::upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_linear1d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4726,8 +4726,8 @@ Tensor & XLAType::upsample_linear1d_backward_out(Tensor & grad_input, const Tens | |
Tensor XLAType::upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_linear1d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & XLAType::upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bilinear2d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & XLAType::upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bilinear2d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor XLAType::upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_bilinear2d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4738,8 +4738,8 @@ Tensor & XLAType::upsample_bilinear2d_backward_out(Tensor & grad_input, const Te | |
Tensor XLAType::upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_bilinear2d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & XLAType::upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bicubic2d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & XLAType::upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_bicubic2d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor XLAType::upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_bicubic2d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4750,8 +4750,8 @@ Tensor & XLAType::upsample_bicubic2d_backward_out(Tensor & grad_input, const Ten | |
Tensor XLAType::upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_bicubic2d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & XLAType::upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_trilinear3d_out(Tensor output, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(output, self, output_size, align_corners); | |
+Tensor & XLAType::upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef, bool)>("upsample_trilinear3d_out(Tensor out, Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(out, self, output_size, align_corners); | |
} | |
Tensor XLAType::upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, bool)>("upsample_trilinear3d(Tensor self, IntArrayRef output_size, bool align_corners) -> Tensor")(self, output_size, align_corners); | |
@@ -4762,8 +4762,8 @@ Tensor & XLAType::upsample_trilinear3d_backward_out(Tensor & grad_input, const T | |
Tensor XLAType::upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef, bool)>("upsample_trilinear3d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) -> Tensor")(grad_output, output_size, input_size, align_corners); | |
} | |
-Tensor & XLAType::upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest1d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & XLAType::upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest1d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor XLAType::upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("upsample_nearest1d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4774,8 +4774,8 @@ Tensor & XLAType::upsample_nearest1d_backward_out(Tensor & grad_input, const Ten | |
Tensor XLAType::upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef)>("upsample_nearest1d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size) -> Tensor")(grad_output, output_size, input_size); | |
} | |
-Tensor & XLAType::upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest2d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & XLAType::upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest2d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor XLAType::upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("upsample_nearest2d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4786,8 +4786,8 @@ Tensor & XLAType::upsample_nearest2d_backward_out(Tensor & grad_input, const Ten | |
Tensor XLAType::upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef)>("upsample_nearest2d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size) -> Tensor")(grad_output, output_size, input_size); | |
} | |
-Tensor & XLAType::upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest3d_out(Tensor output, Tensor self, IntArrayRef output_size) -> Tensor")(output, self, output_size); | |
+Tensor & XLAType::upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, IntArrayRef)>("upsample_nearest3d_out(Tensor out, Tensor self, IntArrayRef output_size) -> Tensor")(out, self, output_size); | |
} | |
Tensor XLAType::upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef)>("upsample_nearest3d(Tensor self, IntArrayRef output_size) -> Tensor")(self, output_size); | |
@@ -4798,20 +4798,20 @@ Tensor & XLAType::upsample_nearest3d_backward_out(Tensor & grad_input, const Ten | |
Tensor XLAType::upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, IntArrayRef, IntArrayRef)>("upsample_nearest3d_backward(Tensor grad_output, IntArrayRef output_size, IntArrayRef input_size) -> Tensor")(grad_output, output_size, input_size); | |
} | |
-Tensor & XLAType::sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("sigmoid_backward_out(Tensor grad_input, Tensor grad_output, Tensor output) -> Tensor")(grad_input, grad_output, output); | |
+Tensor & XLAType::sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("sigmoid_backward_out(Tensor grad_input, Tensor grad_output, Tensor out) -> Tensor")(grad_input, grad_output, out); | |
} | |
-Tensor XLAType::sigmoid_backward(const Tensor & grad_output, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor")(grad_output, output); | |
+Tensor XLAType::sigmoid_backward(const Tensor & grad_output, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("sigmoid_backward(Tensor grad_output, Tensor out) -> Tensor")(grad_output, out); | |
} | |
-Tensor & XLAType::tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("tanh_backward_out(Tensor grad_input, Tensor grad_output, Tensor output) -> Tensor")(grad_input, grad_output, output); | |
+Tensor & XLAType::tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &)>("tanh_backward_out(Tensor grad_input, Tensor grad_output, Tensor out) -> Tensor")(grad_input, grad_output, out); | |
} | |
-Tensor XLAType::tanh_backward(const Tensor & grad_output, const Tensor & output) const { | |
- return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("tanh_backward(Tensor grad_output, Tensor output) -> Tensor")(grad_output, output); | |
+Tensor XLAType::tanh_backward(const Tensor & grad_output, const Tensor & out) const { | |
+ return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &)>("tanh_backward(Tensor grad_output, Tensor out) -> Tensor")(grad_output, out); | |
} | |
-Tensor & XLAType::thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+Tensor & XLAType::thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
Tensor XLAType::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -4828,8 +4828,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> XLAType::thnn_conv_transpose2d_backward_o | |
std::tuple<Tensor,Tensor,Tensor> XLAType::thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
return XLATypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv_transpose2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor columns, Tensor ones, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask); | |
} | |
-Tensor & XLAType::thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose3d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+Tensor & XLAType::thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose3d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
} | |
Tensor XLAType::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_transpose3d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -4846,8 +4846,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> XLAType::thnn_conv_transpose3d_backward_o | |
std::tuple<Tensor,Tensor,Tensor> XLAType::thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
return XLATypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv_transpose3d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor finput, Tensor fgrad_input, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask); | |
} | |
-Tensor & XLAType::thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding); | |
+Tensor & XLAType::thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding); | |
} | |
Tensor XLAType::thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(self, weight, kernel_size, bias, stride, padding); | |
@@ -4864,14 +4864,14 @@ std::tuple<Tensor &,Tensor &,Tensor &> XLAType::thnn_conv2d_backward_out(Tensor | |
std::tuple<Tensor,Tensor,Tensor> XLAType::thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
return XLATypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor finput, Tensor fgrad_input, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-Tensor & XLAType::thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & XLAType::thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor XLAType::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
-Tensor & XLAType::thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_forward_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & XLAType::thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_forward_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor XLAType::thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -4882,8 +4882,8 @@ std::tuple<Tensor &,Tensor &> XLAType::thnn_conv_depthwise2d_backward_out(Tensor | |
std::tuple<Tensor,Tensor> XLAType::thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const { | |
return XLATypeDispatch::get_function<std::tuple<Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array<bool,2>)>("thnn_conv_depthwise2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) -> std::tuple<Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); | |
} | |
-Tensor & XLAType::thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv3d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding); | |
+Tensor & XLAType::thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv3d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding); | |
} | |
Tensor XLAType::thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef)>("thnn_conv3d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding) -> Tensor")(self, weight, kernel_size, bias, stride, padding); | |
@@ -4900,8 +4900,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> XLAType::thnn_conv3d_backward_out(Tensor | |
std::tuple<Tensor,Tensor,Tensor> XLAType::thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
return XLATypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv3d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor finput, Tensor fgrad_input, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); | |
} | |
-Tensor & XLAType::thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated2d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & XLAType::thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated2d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor XLAType::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated2d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -4918,8 +4918,8 @@ std::tuple<Tensor &,Tensor &,Tensor &> XLAType::thnn_conv_dilated2d_backward_out | |
std::tuple<Tensor,Tensor,Tensor> XLAType::thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
return XLATypeDispatch::get_function<std::tuple<Tensor,Tensor,Tensor> (*)(const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array<bool,3>)>("thnn_conv_dilated2d_backward(Tensor grad_output, Tensor self, Tensor weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor columns, Tensor ones, std::array<bool,3> output_mask) -> std::tuple<Tensor,Tensor,Tensor>")(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask); | |
} | |
-Tensor & XLAType::thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
- return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated3d_out(Tensor output, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+Tensor & XLAType::thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
+ return XLATypeDispatch::get_function<Tensor & (*)(Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated3d_out(Tensor out, Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(out, self, weight, kernel_size, bias, stride, padding, dilation); | |
} | |
Tensor XLAType::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const { | |
return XLATypeDispatch::get_function<Tensor (*)(const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef)>("thnn_conv_dilated3d(Tensor self, Tensor weight, IntArrayRef kernel_size, Tensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) -> Tensor")(self, weight, kernel_size, bias, stride, padding, dilation); | |
diff --git a/build/aten/src/ATen/XLAType.h b/build/aten/src/ATen/XLAType.h | |
index ba314d687..a501db9cc 100644 | |
--- a/build/aten/src/ATen/XLAType.h | |
+++ b/build/aten/src/ATen/XLAType.h | |
@@ -1107,7 +1107,7 @@ struct CAFFE2_API XLAType : public TypeDefault { | |
Tensor & zeros_out(Tensor & out, IntArrayRef size) const override; | |
Tensor zeros_like(const Tensor & self) const override; | |
Tensor zeros_like(const Tensor & self, const TensorOptions & options) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
Tensor poisson(const Tensor & self, Generator * generator) const override; | |
Tensor native_norm(const Tensor & self, Scalar p) const override; | |
@@ -1442,100 +1442,100 @@ struct CAFFE2_API XLAType : public TypeDefault { | |
Tensor pow(const Tensor & self, const Tensor & exponent) const override; | |
Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent) const override; | |
Tensor pow(Scalar self, const Tensor & exponent) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, double std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(double mean, const Tensor & std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor alias(const Tensor & self) const override; | |
- Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
+ Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
- Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
- Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
- Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
- Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
+ Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
- Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
+ Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
+ Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) const override; | |
+ Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) const override; | |
Tensor glu(const Tensor & self, int64_t dim) const override; | |
Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
- Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
+ Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const override; | |
- Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const override; | |
+ Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu(const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) const override; | |
- Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) const override; | |
+ Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) const override; | |
Tensor log_sigmoid(const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) const override; | |
std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self) const override; | |
Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
- Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
+ Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
- Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
+ Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) const override; | |
- Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const override; | |
+ Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink(const Tensor & self, Scalar lambd) const override; | |
Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
- Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const override; | |
Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
@@ -1547,11 +1547,11 @@ struct CAFFE2_API XLAType : public TypeDefault { | |
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
- Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
- Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
@@ -1571,103 +1571,103 @@ struct CAFFE2_API XLAType : public TypeDefault { | |
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) const override; | |
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const override; | |
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) const override; | |
- Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
+ Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
- Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor tanh_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const override; | |
- Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
- Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
diff --git a/torch/csrc/autograd/generated/Functions.cpp b/torch/csrc/autograd/generated/Functions.cpp | |
index 89419a623..9d9dc8a7f 100644 | |
--- a/torch/csrc/autograd/generated/Functions.cpp | |
+++ b/torch/csrc/autograd/generated/Functions.cpp | |
@@ -6544,18 +6544,18 @@ variable_list AvgPool3DBackwardBackward::apply(variable_list&& grads) { | |
variable_list EluBackwardBackward::apply(variable_list&& grads) { | |
IndexRangeGenerator gen; | |
auto grad_output_ix = gen.range(1); | |
- auto output_ix = gen.range(1); | |
+ auto out_ix = gen.range(1); | |
variable_list grad_inputs(gen.size()); | |
auto& grad = grads[0]; | |
- auto output = output_.unpack(); | |
+ auto out = out_.unpack(); | |
auto grad_output = grad_output_.unpack(); | |
if (should_compute_output({ grad_output_ix })) { | |
- auto grad_result = elu_backward(grad, alpha, scale, input_scale, output); | |
+ auto grad_result = elu_backward(grad, alpha, scale, input_scale, out); | |
copy_range(grad_inputs, grad_output_ix, grad_result); | |
} | |
- if (should_compute_output({ output_ix })) { | |
- auto grad_result = grad * grad_output * input_scale * (output < 0).toType(grad.type()); | |
- copy_range(grad_inputs, output_ix, grad_result); | |
+ if (should_compute_output({ out_ix })) { | |
+ auto grad_result = grad * grad_output * input_scale * (out < 0).toType(grad.type()); | |
+ copy_range(grad_inputs, out_ix, grad_result); | |
} | |
return grad_inputs; | |
} | |
@@ -6954,10 +6954,10 @@ variable_list SoftplusBackwardBackward::apply(variable_list&& grads) { | |
variable_list grad_inputs(gen.size()); | |
auto& grad = grads[0]; | |
auto self = self_.unpack(); | |
- auto output = output_.unpack(); | |
+ auto out = out_.unpack(); | |
auto grad_output = grad_output_.unpack(); | |
if (should_compute_output({ grad_output_ix })) { | |
- auto grad_result = softplus_backward(grad, self, beta, threshold, output); | |
+ auto grad_result = softplus_backward(grad, self, beta, threshold, out); | |
copy_range(grad_inputs, grad_output_ix, grad_result); | |
} | |
if (should_compute_output({ self_ix })) { | |
@@ -7118,36 +7118,36 @@ variable_list UpsampleNearest3DBackwardBackward::apply(variable_list&& grads) { | |
variable_list SigmoidBackwardBackward::apply(variable_list&& grads) { | |
IndexRangeGenerator gen; | |
auto grad_output_ix = gen.range(1); | |
- auto output_ix = gen.range(1); | |
+ auto out_ix = gen.range(1); | |
variable_list grad_inputs(gen.size()); | |
auto& grad = grads[0]; | |
- auto output = output_.unpack(); | |
+ auto out = out_.unpack(); | |
auto grad_output = grad_output_.unpack(); | |
if (should_compute_output({ grad_output_ix })) { | |
- auto grad_result = sigmoid_backward(grad, output); | |
+ auto grad_result = sigmoid_backward(grad, out); | |
copy_range(grad_inputs, grad_output_ix, grad_result); | |
} | |
- if (should_compute_output({ output_ix })) { | |
- auto grad_result = grad * grad_output * (-2 * output + 1); | |
- copy_range(grad_inputs, output_ix, grad_result); | |
+ if (should_compute_output({ out_ix })) { | |
+ auto grad_result = grad * grad_output * (-2 * out + 1); | |
+ copy_range(grad_inputs, out_ix, grad_result); | |
} | |
return grad_inputs; | |
} | |
variable_list TanhBackwardBackward::apply(variable_list&& grads) { | |
IndexRangeGenerator gen; | |
auto grad_output_ix = gen.range(1); | |
- auto output_ix = gen.range(1); | |
+ auto out_ix = gen.range(1); | |
variable_list grad_inputs(gen.size()); | |
auto& grad = grads[0]; | |
- auto output = output_.unpack(); | |
+ auto out = out_.unpack(); | |
auto grad_output = grad_output_.unpack(); | |
if (should_compute_output({ grad_output_ix })) { | |
- auto grad_result = tanh_backward(grad, output); | |
+ auto grad_result = tanh_backward(grad, out); | |
copy_range(grad_inputs, grad_output_ix, grad_result); | |
} | |
- if (should_compute_output({ output_ix })) { | |
- auto grad_result = -2 * output * grad * grad_output; | |
- copy_range(grad_inputs, output_ix, grad_result); | |
+ if (should_compute_output({ out_ix })) { | |
+ auto grad_result = -2 * out * grad * grad_output; | |
+ copy_range(grad_inputs, out_ix, grad_result); | |
} | |
return grad_inputs; | |
} | |
diff --git a/torch/csrc/autograd/generated/Functions.h b/torch/csrc/autograd/generated/Functions.h | |
index 489dd3793..519be28d7 100644 | |
--- a/torch/csrc/autograd/generated/Functions.h | |
+++ b/torch/csrc/autograd/generated/Functions.h | |
@@ -4631,8 +4631,8 @@ struct EluBackwardBackward : public TraceableFunction { | |
variable_list apply(variable_list&& grads) override; | |
std::string name() const override { return "EluBackwardBackward"; } | |
void release_variables() override { | |
- output_.reset_data(); | |
- output_.reset_grad_function(); | |
+ out_.reset_data(); | |
+ out_.reset_grad_function(); | |
grad_output_.reset_data(); | |
grad_output_.reset_grad_function(); | |
} | |
@@ -4640,7 +4640,7 @@ struct EluBackwardBackward : public TraceableFunction { | |
Scalar alpha; | |
Scalar scale; | |
Scalar input_scale; | |
- SavedVariable output_; | |
+ SavedVariable out_; | |
SavedVariable grad_output_; | |
}; | |
@@ -4979,8 +4979,8 @@ struct SoftplusBackwardBackward : public TraceableFunction { | |
void release_variables() override { | |
self_.reset_data(); | |
self_.reset_grad_function(); | |
- output_.reset_data(); | |
- output_.reset_grad_function(); | |
+ out_.reset_data(); | |
+ out_.reset_grad_function(); | |
grad_output_.reset_data(); | |
grad_output_.reset_grad_function(); | |
} | |
@@ -4988,7 +4988,7 @@ struct SoftplusBackwardBackward : public TraceableFunction { | |
SavedVariable self_; | |
Scalar beta; | |
Scalar threshold; | |
- SavedVariable output_; | |
+ SavedVariable out_; | |
SavedVariable grad_output_; | |
}; | |
@@ -5142,13 +5142,13 @@ struct SigmoidBackwardBackward : public TraceableFunction { | |
variable_list apply(variable_list&& grads) override; | |
std::string name() const override { return "SigmoidBackwardBackward"; } | |
void release_variables() override { | |
- output_.reset_data(); | |
- output_.reset_grad_function(); | |
+ out_.reset_data(); | |
+ out_.reset_grad_function(); | |
grad_output_.reset_data(); | |
grad_output_.reset_grad_function(); | |
} | |
- SavedVariable output_; | |
+ SavedVariable out_; | |
SavedVariable grad_output_; | |
}; | |
@@ -5157,13 +5157,13 @@ struct TanhBackwardBackward : public TraceableFunction { | |
variable_list apply(variable_list&& grads) override; | |
std::string name() const override { return "TanhBackwardBackward"; } | |
void release_variables() override { | |
- output_.reset_data(); | |
- output_.reset_grad_function(); | |
+ out_.reset_data(); | |
+ out_.reset_grad_function(); | |
grad_output_.reset_data(); | |
grad_output_.reset_grad_function(); | |
} | |
- SavedVariable output_; | |
+ SavedVariable out_; | |
SavedVariable grad_output_; | |
}; | |
diff --git a/torch/csrc/autograd/generated/VariableType.h b/torch/csrc/autograd/generated/VariableType.h | |
index d2d449204..c4b5f1451 100644 | |
--- a/torch/csrc/autograd/generated/VariableType.h | |
+++ b/torch/csrc/autograd/generated/VariableType.h | |
@@ -120,7 +120,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
int64_t _dimV(const Tensor & self) const override; | |
Tensor _dim_arange(const Tensor & like, int64_t dim) const override; | |
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
- Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
+ Tensor & _dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const override; | |
std::tuple<Tensor,Tensor,Tensor,Tensor> _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const override; | |
Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse) const override; | |
Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode) const override; | |
@@ -170,7 +170,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype) const override; | |
Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim) const override; | |
Tensor _standard_gamma(const Tensor & self, Generator * generator) const override; | |
- Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) const override; | |
+ Tensor _standard_gamma_grad(const Tensor & self, const Tensor & out) const override; | |
Tensor _th_abs(const Tensor & self) const override; | |
Tensor & _th_abs_out(Tensor & result, const Tensor & self) const override; | |
Tensor _th_acos(const Tensor & self) const override; | |
@@ -707,11 +707,11 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & acos_out(Tensor & out, const Tensor & self) const override; | |
Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
- Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const override; | |
Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const override; | |
- Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size) const override; | |
std::tuple<Tensor,Tensor> adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const override; | |
@@ -781,11 +781,11 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
- Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
- Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
+ Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const override; | |
Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const override; | |
Tensor & baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const override; | |
Tensor & baddbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const override; | |
@@ -802,7 +802,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) const override; | |
Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) const override; | |
Tensor bincount(const Tensor & self, const Tensor & weights, int64_t minlength) const override; | |
@@ -922,9 +922,9 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor einsum(std::string equation, TensorList tensors) const override; | |
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
- Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
- Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const override; | |
- Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
+ Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
+ Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const override; | |
+ Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const override; | |
Tensor embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const override; | |
Tensor embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const override; | |
std::tuple<Tensor,Tensor,Tensor,Tensor> embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const override; | |
@@ -1025,7 +1025,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor glu(const Tensor & self, int64_t dim) const override; | |
Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) const override; | |
- Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) const override; | |
+ Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) const override; | |
Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
std::tuple<Tensor,Tensor> grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const override; | |
@@ -1053,7 +1053,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
- Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
+ Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const override; | |
Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction) const override; | |
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) const override; | |
@@ -1097,7 +1097,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor layer_norm(const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable) const override; | |
Tensor le(const Tensor & self, Scalar other) const override; | |
Tensor le(const Tensor & self, const Tensor & other) const override; | |
@@ -1109,7 +1109,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) const override; | |
Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const override; | |
- Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const override; | |
+ Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const override; | |
Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight) const override; | |
Tensor & lerp_(Tensor & self, const Tensor & end, Scalar weight) const override; | |
Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, Scalar weight) const override; | |
@@ -1137,7 +1137,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const override; | |
std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self) const override; | |
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) const override; | |
- Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) const override; | |
+ Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) const override; | |
Tensor log_softmax(const Tensor & self, int64_t dim, ScalarType dtype) const override; | |
Tensor log_softmax(const Tensor & self, int64_t dim) const override; | |
Tensor logdet(const Tensor & self) const override; | |
@@ -1189,11 +1189,11 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
- Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
+ Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const override; | |
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
- Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor max_values(const Tensor & self, IntArrayRef dim, bool keepdim) const override; | |
Tensor mean(const Tensor & self, ScalarType dtype) const override; | |
Tensor mean(const Tensor & self) const override; | |
@@ -1235,7 +1235,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor mul(const Tensor & self, const Tensor & other) const override; | |
Tensor mul(const Tensor & self, Scalar other) const override; | |
Tensor & mul_(Tensor & self, const Tensor & other) const override; | |
@@ -1244,13 +1244,13 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
- Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
+ Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const override; | |
Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) const override; | |
std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const override; | |
Tensor & multinomial_out(Tensor & out, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const override; | |
Tensor mv(const Tensor & self, const Tensor & vec) const override; | |
@@ -1282,12 +1282,12 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
- Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const override; | |
std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
- Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
+ Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const override; | |
Tensor nonzero(const Tensor & self) const override; | |
Tensor & nonzero_out(Tensor & out, const Tensor & self) const override; | |
Tensor norm(const Tensor & self, c10::optional<Scalar> p, ScalarType dtype) const override; | |
@@ -1301,9 +1301,9 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor normal(double mean, const Tensor & std, Generator * generator) const override; | |
Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor & normal_(Tensor & self, double mean, double std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const override; | |
- Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const override; | |
+ Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const override; | |
Tensor nuclear_norm(const Tensor & self, bool keepdim) const override; | |
Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, bool keepdim) const override; | |
int64_t numel(const Tensor & self) const override; | |
@@ -1397,11 +1397,11 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor relu(const Tensor & self) const override; | |
Tensor & relu_(Tensor & self) const override; | |
Tensor remainder(const Tensor & self, Scalar other) const override; | |
@@ -1417,15 +1417,15 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) const override; | |
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) const override; | |
- Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const override; | |
+ Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const override; | |
Tensor reshape(const Tensor & self, IntArrayRef shape) const override; | |
Tensor reshape_as(const Tensor & self, const Tensor & other) const override; | |
Tensor & resize_(Tensor & self, IntArrayRef size) const override; | |
@@ -1448,7 +1448,7 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const override; | |
- Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
+ Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const override; | |
Tensor rsqrt(const Tensor & self) const override; | |
Tensor & rsqrt_(Tensor & self) const override; | |
Tensor & rsqrt_out(Tensor & out, const Tensor & self) const override; | |
@@ -1475,8 +1475,8 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & set_(Tensor & self) const override; | |
Tensor sigmoid(const Tensor & self) const override; | |
Tensor & sigmoid_(Tensor & self) const override; | |
- Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
+ Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
Tensor & sigmoid_out(Tensor & out, const Tensor & self) const override; | |
Tensor sign(const Tensor & self) const override; | |
Tensor & sign_(Tensor & self) const override; | |
@@ -1495,21 +1495,21 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
- Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
+ Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const override; | |
Tensor softmax(const Tensor & self, int64_t dim, ScalarType dtype) const override; | |
Tensor softmax(const Tensor & self, int64_t dim) const override; | |
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) const override; | |
- Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const override; | |
- Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
+ Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & out) const override; | |
+ Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) const override; | |
Tensor softshrink(const Tensor & self, Scalar lambd) const override; | |
Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const override; | |
- Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const override; | |
+ Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) const override; | |
std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim, bool descending) const override; | |
std::tuple<Tensor &,Tensor &> sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) const override; | |
Tensor sparse_coo_tensor(IntArrayRef size, const TensorOptions & options) const override; | |
@@ -1565,8 +1565,8 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor & tan_out(Tensor & out, const Tensor & self) const override; | |
Tensor tanh(const Tensor & self) const override; | |
Tensor & tanh_(Tensor & self) const override; | |
- Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) const override; | |
- Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const override; | |
+ Tensor tanh_backward(const Tensor & grad_output, const Tensor & out) const override; | |
+ Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & out) const override; | |
Tensor & tanh_out(Tensor & out, const Tensor & self) const override; | |
Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other) const override; | |
Tensor thnn_col2im(const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) const override; | |
@@ -1576,43 +1576,43 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
- Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
- Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
+ Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) const override; | |
Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) const override; | |
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_dilated3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) const override; | |
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
- Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
+ Tensor & thnn_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) const override; | |
Tensor thnn_im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) const override; | |
Tensor thnn_im2col_backward(const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) const override; | |
Tensor threshold(const Tensor & self, Scalar threshold, Scalar value) const override; | |
@@ -1656,31 +1656,31 @@ struct TORCH_API VariableType final : public at::TypeDefault { | |
Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bicubic2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) const override; | |
- Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const override; | |
+ Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const override; | |
Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) const override; | |
- Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
+ Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) const override; | |
Tensor values(const Tensor & self) const override; | |
Tensor var(const Tensor & self, bool unbiased) const override; | |
Tensor var(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) const override; | |
diff --git a/torch/csrc/autograd/generated/VariableTypeEverything.cpp b/torch/csrc/autograd/generated/VariableTypeEverything.cpp | |
index 592c194ff..100268dfb 100644 | |
--- a/torch/csrc/autograd/generated/VariableTypeEverything.cpp | |
+++ b/torch/csrc/autograd/generated/VariableTypeEverything.cpp | |
@@ -2011,7 +2011,7 @@ Tensor VariableType::_dirichlet_grad(const Tensor & x, const Tensor & alpha, con | |
} | |
return result; | |
} | |
-Tensor & VariableType::_dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
+Tensor & VariableType::_dirichlet_grad_out(Tensor & out, const Tensor & x, const Tensor & alpha, const Tensor & total) const { | |
profiler::RecordFunction profiler("_dirichlet_grad_out", Function::peek_at_next_sequence_nr()); | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
@@ -2027,18 +2027,18 @@ Tensor & VariableType::_dirichlet_grad_out(Tensor & output, const Tensor & x, co | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("_dirichlet_grad_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("_dirichlet_grad_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
- TypeDefault::_dirichlet_grad_out(output, x, alpha, total); | |
+ TypeDefault::_dirichlet_grad_out(out, x, alpha, total); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
std::tuple<Tensor,Tensor,Tensor,Tensor> VariableType::_embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse) const { | |
profiler::RecordFunction profiler("_embedding_bag", Function::peek_at_next_sequence_nr()); | |
@@ -4239,10 +4239,11 @@ Tensor VariableType::_standard_gamma(const Tensor & self, Generator * generator) | |
} | |
return result; | |
} | |
-Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
+Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & out) const { | |
profiler::RecordFunction profiler("_standard_gamma_grad", Function::peek_at_next_sequence_nr()); | |
auto& self_ = unpack(self, "self", 0); | |
- auto& output_ = unpack(output, "output", 1); | |
+ auto& out_ = unpack(out, "out", 1); | |
+ check_no_requires_grad(out, "out"); | |
std::shared_ptr<StandardGammaGradBackward> grad_fn; | |
if (compute_requires_grad( self )) { | |
grad_fn = std::shared_ptr<StandardGammaGradBackward>(new StandardGammaGradBackward(), deleteFunction); | |
@@ -4257,7 +4258,7 @@ Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & ou | |
node = tracer_state->graph->create(op_name, /*num_outputs=*/0); | |
jit::tracer::recordSourceLocation(node); | |
jit::tracer::addInputs(node, "self", self); | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
tracer_state->graph->insertNode(node); | |
jit::tracer::setTracingState(nullptr); | |
@@ -4267,23 +4268,23 @@ Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & ou | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
#endif | |
auto tmp = ([&]() { | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- return baseType->_standard_gamma_grad(self_, output_); | |
+ return baseType->_standard_gamma_grad(self_, out_); | |
})(); | |
auto result = as_variable(tmp); | |
#ifndef NDEBUG | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
#endif | |
set_history(flatten_tensor_args( result ), grad_fn); | |
if (tracer_state) { | |
@@ -36005,15 +36006,15 @@ Tensor VariableType::adaptive_avg_pool2d(const Tensor & self, IntArrayRef output | |
} | |
return result; | |
} | |
-Tensor & VariableType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & VariableType::adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("adaptive_avg_pool2d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("adaptive_avg_pool2d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -36029,17 +36030,17 @@ Tensor & VariableType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & s | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("adaptive_avg_pool2d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("adaptive_avg_pool2d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -36047,23 +36048,23 @@ Tensor & VariableType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & s | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->adaptive_avg_pool2d_out(output_, self_, output_size); | |
+ baseType->adaptive_avg_pool2d_out(out_, self_, output_size); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d", Function::peek_at_next_sequence_nr()); | |
@@ -36234,15 +36235,15 @@ Tensor & VariableType::adaptive_avg_pool3d_backward_out(Tensor & grad_input, con | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef output_size) const { | |
+Tensor & VariableType::adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("adaptive_avg_pool3d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("adaptive_avg_pool3d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -36258,17 +36259,17 @@ Tensor & VariableType::adaptive_avg_pool3d_out(Tensor & output, const Tensor & s | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("adaptive_avg_pool3d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("adaptive_avg_pool3d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -36276,23 +36277,23 @@ Tensor & VariableType::adaptive_avg_pool3d_out(Tensor & output, const Tensor & s | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->adaptive_avg_pool3d_out(output_, self_, output_size); | |
+ baseType->adaptive_avg_pool3d_out(out_, self_, output_size); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool1d", Function::peek_at_next_sequence_nr()); | |
@@ -40031,15 +40032,15 @@ Tensor & VariableType::avg_pool2d_backward_out(Tensor & grad_input, const Tensor | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::avg_pool2d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+Tensor & VariableType::avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("avg_pool2d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("avg_pool2d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -40059,17 +40060,17 @@ Tensor & VariableType::avg_pool2d_out(Tensor & output, const Tensor & self, IntA | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("avg_pool2d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("avg_pool2d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -40077,23 +40078,23 @@ Tensor & VariableType::avg_pool2d_out(Tensor & output, const Tensor & self, IntA | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->avg_pool2d_out(output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ baseType->avg_pool2d_out(out_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d", Function::peek_at_next_sequence_nr()); | |
@@ -40287,15 +40288,15 @@ Tensor & VariableType::avg_pool3d_backward_out(Tensor & grad_input, const Tensor | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::avg_pool3d_out(Tensor & output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
+Tensor & VariableType::avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("avg_pool3d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("avg_pool3d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -40315,17 +40316,17 @@ Tensor & VariableType::avg_pool3d_out(Tensor & output, const Tensor & self, IntA | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("avg_pool3d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("avg_pool3d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -40333,23 +40334,23 @@ Tensor & VariableType::avg_pool3d_out(Tensor & output, const Tensor & self, IntA | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->avg_pool3d_out(output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ baseType->avg_pool3d_out(out_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("baddbmm", Function::peek_at_next_sequence_nr()); | |
@@ -41113,9 +41114,9 @@ Tensor & VariableType::binary_cross_entropy_backward_out(Tensor & grad_input, co | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
+Tensor & VariableType::binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
@@ -41123,7 +41124,7 @@ Tensor & VariableType::binary_cross_entropy_out(Tensor & output, const Tensor & | |
if (compute_requires_grad( self, target, weight )) { | |
throw_error_out_requires_grad("binary_cross_entropy"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("binary_cross_entropy"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -41141,17 +41142,17 @@ Tensor & VariableType::binary_cross_entropy_out(Tensor & output, const Tensor & | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("binary_cross_entropy_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("binary_cross_entropy_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -41167,12 +41168,12 @@ Tensor & VariableType::binary_cross_entropy_out(Tensor & output, const Tensor & | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->binary_cross_entropy_out(output_, self_, target_, weight_, reduction); | |
+ baseType->binary_cross_entropy_out(out_, self_, target_, weight_, reduction); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
@@ -41183,13 +41184,13 @@ Tensor & VariableType::binary_cross_entropy_out(Tensor & output, const Tensor & | |
AT_ASSERT(weight__storage_saved.value().is_alias_of(weight_.storage())); | |
if (weight__impl_saved) AT_ASSERT(weight__impl_saved == weight_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_with_logits", Function::peek_at_next_sequence_nr()); | |
@@ -46770,18 +46771,18 @@ Tensor & VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar in | |
} | |
return self; | |
} | |
-Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
+Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
profiler::RecordFunction profiler("elu_backward", Function::peek_at_next_sequence_nr()); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
- auto& output_ = unpack(output, "output", 4); | |
+ auto& out_ = unpack(out, "out", 4); | |
std::shared_ptr<EluBackwardBackward> grad_fn; | |
- if (compute_requires_grad( grad_output, output )) { | |
+ if (compute_requires_grad( grad_output, out )) { | |
grad_fn = std::shared_ptr<EluBackwardBackward>(new EluBackwardBackward(), deleteFunction); | |
- grad_fn->set_next_edges(collect_next_edges( grad_output, output )); | |
+ grad_fn->set_next_edges(collect_next_edges( grad_output, out )); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
grad_fn->input_scale = input_scale; | |
- grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->out_ = SavedVariable(out, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -46796,7 +46797,7 @@ Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scal | |
jit::tracer::addInputs(node, "alpha", alpha); | |
jit::tracer::addInputs(node, "scale", scale); | |
jit::tracer::addInputs(node, "input_scale", input_scale); | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
tracer_state->graph->insertNode(node); | |
jit::tracer::setTracingState(nullptr); | |
@@ -46806,23 +46807,23 @@ Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scal | |
grad_output_.has_storage() ? c10::optional<Storage>(grad_output_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> grad_output__impl_saved; | |
if (grad_output_.defined()) grad_output__impl_saved = grad_output_.getIntrusivePtr(); | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
#endif | |
auto tmp = ([&]() { | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- return baseType->elu_backward(grad_output_, alpha, scale, input_scale, output_); | |
+ return baseType->elu_backward(grad_output_, alpha, scale, input_scale, out_); | |
})(); | |
auto result = as_variable(tmp); | |
#ifndef NDEBUG | |
if (grad_output__storage_saved.has_value()) | |
AT_ASSERT(grad_output__storage_saved.value().is_alias_of(grad_output_.storage())); | |
if (grad_output__impl_saved) AT_ASSERT(grad_output__impl_saved == grad_output_.getIntrusivePtr()); | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
#endif | |
set_history(flatten_tensor_args( result ), grad_fn); | |
if (tracer_state) { | |
@@ -46831,13 +46832,13 @@ Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scal | |
} | |
return result; | |
} | |
-Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) const { | |
+Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & out) const { | |
profiler::RecordFunction profiler("elu_backward_out", Function::peek_at_next_sequence_nr()); | |
auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
- auto& output_ = unpack(output, "output", 5); | |
+ auto& out_ = unpack(out, "out", 5); | |
std::shared_ptr<Function> grad_fn; | |
- if (compute_requires_grad( grad_output, output )) { | |
+ if (compute_requires_grad( grad_output, out )) { | |
throw_error_out_requires_grad("elu_backward"); | |
} | |
if (compute_requires_grad( grad_input )) { | |
@@ -46855,7 +46856,7 @@ Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad | |
jit::tracer::addInputs(node, "alpha", alpha); | |
jit::tracer::addInputs(node, "scale", scale); | |
jit::tracer::addInputs(node, "input_scale", input_scale); | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
if (tracer_state->force_outplace) { | |
} else { | |
@@ -46874,14 +46875,14 @@ Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad | |
grad_output_.has_storage() ? c10::optional<Storage>(grad_output_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> grad_output__impl_saved; | |
if (grad_output_.defined()) grad_output__impl_saved = grad_output_.getIntrusivePtr(); | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->elu_backward_out(grad_input_, grad_output_, alpha, scale, input_scale, output_); | |
+ baseType->elu_backward_out(grad_input_, grad_output_, alpha, scale, input_scale, out_); | |
} | |
#ifndef NDEBUG | |
if (grad_input__storage_saved.has_value()) | |
@@ -46890,9 +46891,9 @@ Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad | |
if (grad_output__storage_saved.has_value()) | |
AT_ASSERT(grad_output__storage_saved.value().is_alias_of(grad_output_.storage())); | |
if (grad_output__impl_saved) AT_ASSERT(grad_output__impl_saved == grad_output_.getIntrusivePtr()); | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
#endif | |
increment_version(grad_input); | |
rebase_history(flatten_tensor_args( grad_input ), grad_fn); | |
@@ -46902,15 +46903,15 @@ Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
+Tensor & VariableType::elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) const { | |
profiler::RecordFunction profiler("elu_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("elu"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("elu"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -46928,17 +46929,17 @@ Tensor & VariableType::elu_out(Tensor & output, const Tensor & self, Scalar alph | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("elu_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("elu_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -46946,23 +46947,23 @@ Tensor & VariableType::elu_out(Tensor & output, const Tensor & self, Scalar alph | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->elu_out(output_, self_, alpha, scale, input_scale); | |
+ baseType->elu_out(out_, self_, alpha, scale, input_scale); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const { | |
profiler::RecordFunction profiler("embedding", Function::peek_at_next_sequence_nr()); | |
@@ -51480,15 +51481,15 @@ Tensor & VariableType::glu_backward_out(Tensor & grad_input, const Tensor & grad | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::glu_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+Tensor & VariableType::glu_out(Tensor & out, const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("glu"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("glu"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -51504,17 +51505,17 @@ Tensor & VariableType::glu_out(Tensor & output, const Tensor & self, int64_t dim | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("glu_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("glu_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -51522,23 +51523,23 @@ Tensor & VariableType::glu_out(Tensor & output, const Tensor & self, int64_t dim | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->glu_out(output_, self_, dim); | |
+ baseType->glu_out(out_, self_, dim); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode) const { | |
profiler::RecordFunction profiler("grid_sampler", Function::peek_at_next_sequence_nr()); | |
@@ -52609,15 +52610,15 @@ Tensor & VariableType::hardtanh_backward_out(Tensor & grad_input, const Tensor & | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+Tensor & VariableType::hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("hardtanh"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("hardtanh"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -52634,17 +52635,17 @@ Tensor & VariableType::hardtanh_out(Tensor & output, const Tensor & self, Scalar | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("hardtanh_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("hardtanh_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -52652,23 +52653,23 @@ Tensor & VariableType::hardtanh_out(Tensor & output, const Tensor & self, Scalar | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->hardtanh_out(output_, self_, min_val, max_val); | |
+ baseType->hardtanh_out(out_, self_, min_val, max_val); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction) const { | |
profiler::RecordFunction profiler("hinge_embedding_loss", Function::peek_at_next_sequence_nr()); | |
@@ -54408,16 +54409,16 @@ Tensor & VariableType::l1_loss_backward_out(Tensor & grad_input, const Tensor & | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & VariableType::l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
profiler::RecordFunction profiler("l1_loss_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self, target )) { | |
throw_error_out_requires_grad("l1_loss"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("l1_loss"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -54434,17 +54435,17 @@ Tensor & VariableType::l1_loss_out(Tensor & output, const Tensor & self, const T | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("l1_loss_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("l1_loss_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -54456,12 +54457,12 @@ Tensor & VariableType::l1_loss_out(Tensor & output, const Tensor & self, const T | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->l1_loss_out(output_, self_, target_, reduction); | |
+ baseType->l1_loss_out(out_, self_, target_, reduction); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
@@ -54469,13 +54470,13 @@ Tensor & VariableType::l1_loss_out(Tensor & output, const Tensor & self, const T | |
AT_ASSERT(target__storage_saved.value().is_alias_of(target_.storage())); | |
if (target__impl_saved) AT_ASSERT(target__impl_saved == target_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::layer_norm(const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable) const { | |
profiler::RecordFunction profiler("layer_norm", Function::peek_at_next_sequence_nr()); | |
@@ -54943,15 +54944,15 @@ Tensor & VariableType::leaky_relu_backward_out(Tensor & grad_input, const Tensor | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const { | |
+Tensor & VariableType::leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("leaky_relu"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("leaky_relu"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -54967,17 +54968,17 @@ Tensor & VariableType::leaky_relu_out(Tensor & output, const Tensor & self, Scal | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("leaky_relu_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("leaky_relu_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -54985,23 +54986,23 @@ Tensor & VariableType::leaky_relu_out(Tensor & output, const Tensor & self, Scal | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->leaky_relu_out(output_, self_, negative_slope); | |
+ baseType->leaky_relu_out(out_, self_, negative_slope); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::lerp(const Tensor & self, const Tensor & end, Scalar weight) const { | |
profiler::RecordFunction profiler("lerp", Function::peek_at_next_sequence_nr()); | |
@@ -56368,7 +56369,7 @@ std::tuple<Tensor &,Tensor &> VariableType::log_sigmoid_forward_out(Tensor & out | |
} | |
return std::forward_as_tuple(output, buffer); | |
} | |
-Tensor & VariableType::log_sigmoid_out(Tensor & output, const Tensor & self) const { | |
+Tensor & VariableType::log_sigmoid_out(Tensor & out, const Tensor & self) const { | |
profiler::RecordFunction profiler("log_sigmoid_out", Function::peek_at_next_sequence_nr()); | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
@@ -56382,18 +56383,18 @@ Tensor & VariableType::log_sigmoid_out(Tensor & output, const Tensor & self) con | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("log_sigmoid_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("log_sigmoid_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
- TypeDefault::log_sigmoid_out(output, self); | |
+ TypeDefault::log_sigmoid_out(out, self); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::log_softmax(const Tensor & self, int64_t dim, ScalarType dtype) const { | |
profiler::RecordFunction profiler("log_softmax", Function::peek_at_next_sequence_nr()); | |
@@ -58718,16 +58719,16 @@ Tensor & VariableType::max_unpool2d_backward_out(Tensor & grad_input, const Tens | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
+Tensor & VariableType::max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack(indices, "indices", 2); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self, indices )) { | |
throw_error_out_requires_grad("max_unpool2d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("max_unpool2d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -58744,17 +58745,17 @@ Tensor & VariableType::max_unpool2d_out(Tensor & output, const Tensor & self, co | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("max_unpool2d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("max_unpool2d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -58766,12 +58767,12 @@ Tensor & VariableType::max_unpool2d_out(Tensor & output, const Tensor & self, co | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->max_unpool2d_out(output_, self_, indices_, output_size); | |
+ baseType->max_unpool2d_out(out_, self_, indices_, output_size); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
@@ -58779,13 +58780,13 @@ Tensor & VariableType::max_unpool2d_out(Tensor & output, const Tensor & self, co | |
AT_ASSERT(indices__storage_saved.value().is_alias_of(indices_.storage())); | |
if (indices__impl_saved) AT_ASSERT(indices__impl_saved == indices_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
profiler::RecordFunction profiler("max_unpool3d", Function::peek_at_next_sequence_nr()); | |
@@ -58908,16 +58909,16 @@ Tensor & VariableType::max_unpool3d_backward_out(Tensor & grad_input, const Tens | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
+Tensor & VariableType::max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) const { | |
profiler::RecordFunction profiler("max_unpool3d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack(indices, "indices", 2); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self, indices )) { | |
throw_error_out_requires_grad("max_unpool3d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("max_unpool3d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -58936,17 +58937,17 @@ Tensor & VariableType::max_unpool3d_out(Tensor & output, const Tensor & self, co | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("max_unpool3d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("max_unpool3d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -58958,12 +58959,12 @@ Tensor & VariableType::max_unpool3d_out(Tensor & output, const Tensor & self, co | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->max_unpool3d_out(output_, self_, indices_, output_size, stride, padding); | |
+ baseType->max_unpool3d_out(out_, self_, indices_, output_size, stride, padding); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
@@ -58971,13 +58972,13 @@ Tensor & VariableType::max_unpool3d_out(Tensor & output, const Tensor & self, co | |
AT_ASSERT(indices__storage_saved.value().is_alias_of(indices_.storage())); | |
if (indices__impl_saved) AT_ASSERT(indices__impl_saved == indices_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::max_values(const Tensor & self, IntArrayRef dim, bool keepdim) const { | |
profiler::RecordFunction profiler("max_values", Function::peek_at_next_sequence_nr()); | |
@@ -61432,16 +61433,16 @@ Tensor & VariableType::mse_loss_backward_out(Tensor & grad_input, const Tensor & | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & VariableType::mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
profiler::RecordFunction profiler("mse_loss_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self, target )) { | |
throw_error_out_requires_grad("mse_loss"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("mse_loss"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -61458,17 +61459,17 @@ Tensor & VariableType::mse_loss_out(Tensor & output, const Tensor & self, const | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("mse_loss_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("mse_loss_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -61480,12 +61481,12 @@ Tensor & VariableType::mse_loss_out(Tensor & output, const Tensor & self, const | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->mse_loss_out(output_, self_, target_, reduction); | |
+ baseType->mse_loss_out(out_, self_, target_, reduction); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
@@ -61493,13 +61494,13 @@ Tensor & VariableType::mse_loss_out(Tensor & output, const Tensor & self, const | |
AT_ASSERT(target__storage_saved.value().is_alias_of(target_.storage())); | |
if (target__impl_saved) AT_ASSERT(target__impl_saved == target_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::mul(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("mul", Function::peek_at_next_sequence_nr()); | |
@@ -61923,9 +61924,9 @@ Tensor & VariableType::multi_margin_loss_backward_out(Tensor & grad_input, const | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
+Tensor & VariableType::multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) const { | |
profiler::RecordFunction profiler("multi_margin_loss_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 5); | |
@@ -61933,7 +61934,7 @@ Tensor & VariableType::multi_margin_loss_out(Tensor & output, const Tensor & sel | |
if (compute_requires_grad( self, target, weight )) { | |
throw_error_out_requires_grad("multi_margin_loss"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("multi_margin_loss"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -61953,17 +61954,17 @@ Tensor & VariableType::multi_margin_loss_out(Tensor & output, const Tensor & sel | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("multi_margin_loss_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("multi_margin_loss_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -61979,12 +61980,12 @@ Tensor & VariableType::multi_margin_loss_out(Tensor & output, const Tensor & sel | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->multi_margin_loss_out(output_, self_, target_, p, margin, weight_, reduction); | |
+ baseType->multi_margin_loss_out(out_, self_, target_, p, margin, weight_, reduction); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
@@ -61995,13 +61996,13 @@ Tensor & VariableType::multi_margin_loss_out(Tensor & output, const Tensor & sel | |
AT_ASSERT(weight__storage_saved.value().is_alias_of(weight_.storage())); | |
if (weight__impl_saved) AT_ASSERT(weight__impl_saved == weight_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss", Function::peek_at_next_sequence_nr()); | |
@@ -62227,7 +62228,7 @@ std::tuple<Tensor &,Tensor &> VariableType::multilabel_margin_loss_forward_out(T | |
} | |
return std::forward_as_tuple(output, is_target); | |
} | |
-Tensor & VariableType::multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
+Tensor & VariableType::multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss_out", Function::peek_at_next_sequence_nr()); | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
@@ -62243,18 +62244,18 @@ Tensor & VariableType::multilabel_margin_loss_out(Tensor & output, const Tensor | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("multilabel_margin_loss_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("multilabel_margin_loss_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
- TypeDefault::multilabel_margin_loss_out(output, self, target, reduction); | |
+ TypeDefault::multilabel_margin_loss_out(out, self, target, reduction); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const { | |
profiler::RecordFunction profiler("multinomial", Function::peek_at_next_sequence_nr()); | |
@@ -63896,7 +63897,7 @@ std::tuple<Tensor &,Tensor &> VariableType::nll_loss2d_forward_out(Tensor & outp | |
} | |
return std::forward_as_tuple(output, total_weight); | |
} | |
-Tensor & VariableType::nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+Tensor & VariableType::nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
profiler::RecordFunction profiler("nll_loss2d_out", Function::peek_at_next_sequence_nr()); | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
@@ -63914,18 +63915,18 @@ Tensor & VariableType::nll_loss2d_out(Tensor & output, const Tensor & self, cons | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("nll_loss2d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("nll_loss2d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
- TypeDefault::nll_loss2d_out(output, self, target, weight, reduction, ignore_index); | |
+ TypeDefault::nll_loss2d_out(out, self, target, weight, reduction, ignore_index); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss_backward", Function::peek_at_next_sequence_nr()); | |
@@ -64279,7 +64280,7 @@ std::tuple<Tensor &,Tensor &> VariableType::nll_loss_forward_out(Tensor & output | |
} | |
return std::forward_as_tuple(output, total_weight); | |
} | |
-Tensor & VariableType::nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
+Tensor & VariableType::nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) const { | |
profiler::RecordFunction profiler("nll_loss_out", Function::peek_at_next_sequence_nr()); | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
@@ -64297,18 +64298,18 @@ Tensor & VariableType::nll_loss_out(Tensor & output, const Tensor & self, const | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("nll_loss_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("nll_loss_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
- TypeDefault::nll_loss_out(output, self, target, weight, reduction, ignore_index); | |
+ TypeDefault::nll_loss_out(out, self, target, weight, reduction, ignore_index); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::nonzero(const Tensor & self) const { | |
profiler::RecordFunction profiler("nonzero", Function::peek_at_next_sequence_nr()); | |
@@ -64920,15 +64921,15 @@ Tensor & VariableType::normal_(Tensor & self, double mean, double std, Generator | |
} | |
return self; | |
} | |
-Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const { | |
+Tensor & VariableType::normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& mean_ = unpack(mean, "mean", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( mean )) { | |
throw_error_out_requires_grad("normal"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("normal"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -64945,17 +64946,17 @@ Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, double s | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("normal_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("normal_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> mean__storage_saved = | |
mean_.has_storage() ? c10::optional<Storage>(mean_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> mean__impl_saved; | |
@@ -64963,33 +64964,33 @@ Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, double s | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->normal_out(output_, mean_, std, generator); | |
+ baseType->normal_out(out_, mean_, std, generator); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (mean__storage_saved.has_value()) | |
AT_ASSERT(mean__storage_saved.value().is_alias_of(mean_.storage())); | |
if (mean__impl_saved) AT_ASSERT(mean__impl_saved == mean_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
-Tensor & VariableType::normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const { | |
+Tensor & VariableType::normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& std_ = unpack(std, "std", 2); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( std )) { | |
throw_error_out_requires_grad("normal"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("normal"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -65006,17 +65007,17 @@ Tensor & VariableType::normal_out(Tensor & output, double mean, const Tensor & s | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("normal_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("normal_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> std__storage_saved = | |
std_.has_storage() ? c10::optional<Storage>(std_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> std__impl_saved; | |
@@ -65024,34 +65025,34 @@ Tensor & VariableType::normal_out(Tensor & output, double mean, const Tensor & s | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->normal_out(output_, mean, std_, generator); | |
+ baseType->normal_out(out_, mean, std_, generator); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (std__storage_saved.has_value()) | |
AT_ASSERT(std__storage_saved.value().is_alias_of(std_.storage())); | |
if (std__impl_saved) AT_ASSERT(std__impl_saved == std_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
-Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
+Tensor & VariableType::normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& mean_ = unpack(mean, "mean", 1); | |
auto& std_ = unpack(std, "std", 2); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( mean, std )) { | |
throw_error_out_requires_grad("normal"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("normal"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -65068,17 +65069,17 @@ Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, const Te | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("normal_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("normal_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> mean__storage_saved = | |
mean_.has_storage() ? c10::optional<Storage>(mean_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> mean__impl_saved; | |
@@ -65090,12 +65091,12 @@ Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, const Te | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->normal_out(output_, mean_, std_, generator); | |
+ baseType->normal_out(out_, mean_, std_, generator); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (mean__storage_saved.has_value()) | |
AT_ASSERT(mean__storage_saved.value().is_alias_of(mean_.storage())); | |
if (mean__impl_saved) AT_ASSERT(mean__impl_saved == mean_.getIntrusivePtr()); | |
@@ -65103,13 +65104,13 @@ Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, const Te | |
AT_ASSERT(std__storage_saved.value().is_alias_of(std_.storage())); | |
if (std__impl_saved) AT_ASSERT(std__impl_saved == std_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::nuclear_norm(const Tensor & self, bool keepdim) const { | |
profiler::RecordFunction profiler("nuclear_norm", Function::peek_at_next_sequence_nr()); | |
@@ -68815,15 +68816,15 @@ Tensor & VariableType::reflection_pad1d_backward_out(Tensor & grad_input, const | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & VariableType::reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<Function> grad_fn; | |
if (compute_requires_grad( self )) { | |
throw_error_out_requires_grad("reflection_pad1d"); | |
} | |
- if (compute_requires_grad( output )) { | |
+ if (compute_requires_grad( out )) { | |
throw_error_out_requires_grad("reflection_pad1d"); | |
} | |
torch::jit::Node* node = nullptr; | |
@@ -68839,17 +68840,17 @@ Tensor & VariableType::reflection_pad1d_out(Tensor & output, const Tensor & self | |
if (tracer_state->force_outplace) { | |
} else { | |
- jit::tracer::addInputs(node, "output", output); | |
+ jit::tracer::addInputs(node, "out", out); | |
} | |
tracer_state->graph->insertNode(node); | |
- jit::tracer::ensureUniqueIfOutOfPlaced("reflection_pad1d_out", output); | |
+ jit::tracer::ensureUniqueIfOutOfPlaced("reflection_pad1d_out", out); | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> output__storage_saved = | |
- output_.has_storage() ? c10::optional<Storage>(output_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> output__impl_saved; | |
- if (output_.defined()) output__impl_saved = output_.getIntrusivePtr(); | |
+ c10::optional<Storage> out__storage_saved = | |
+ out_.has_storage() ? c10::optional<Storage>(out_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> out__impl_saved; | |
+ if (out_.defined()) out__impl_saved = out_.getIntrusivePtr(); | |
c10::optional<Storage> self__storage_saved = | |
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
@@ -68857,23 +68858,23 @@ Tensor & VariableType::reflection_pad1d_out(Tensor & output, const Tensor & self | |
#endif | |
{ | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- baseType->reflection_pad1d_out(output_, self_, padding); | |
+ baseType->reflection_pad1d_out(out_, self_, padding); | |
} | |
#ifndef NDEBUG | |
- if (output__storage_saved.has_value()) | |
- AT_ASSERT(output__storage_saved.value().is_alias_of(output_.storage())); | |
- if (output__impl_saved) AT_ASSERT(output__impl_saved == output_.getIntrusivePtr()); | |
+ if (out__storage_saved.has_value()) | |
+ AT_ASSERT(out__storage_saved.value().is_alias_of(out_.storage())); | |
+ if (out__impl_saved) AT_ASSERT(out__impl_saved == out_.getIntrusivePtr()); | |
if (self__storage_saved.has_value()) | |
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
#endif | |
- increment_version(output); | |
- rebase_history(flatten_tensor_args( output ), grad_fn); | |
+ increment_version(out); | |
+ rebase_history(flatten_tensor_args( out ), grad_fn); | |
if (tracer_state) { | |
jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, output); | |
+ jit::tracer::addOutput(node, out); | |
} | |
- return output; | |
+ return out; | |
} | |
Tensor VariableType::reflection_pad2d(const Tensor & self, IntArrayRef padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d", Function::peek_at_next_sequence_nr()); | |
@@ -69047,15 +69048,15 @@ Tensor & VariableType::reflection_pad2d_backward_out(Tensor & grad_input, const | |
} | |
return grad_input; | |
} | |
-Tensor & VariableType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntArrayRef padding) const { | |
+Tensor & VariableType::reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d_out", Function::peek_at_next_sequence_nr()); | |
- auto& output_ = unpack(output, "output", 0); | |
+ auto& out_ = unpack(out, "out", 0); | |
a |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment