Skip to content

Instantly share code, notes, and snippets.

@cpuhrsch
Created March 3, 2019 02:01
Show Gist options
  • Save cpuhrsch/8ea6fd132d9a7945d602ecdc26c3b2da to your computer and use it in GitHub Desktop.
Save cpuhrsch/8ea6fd132d9a7945d602ecdc26c3b2da to your computer and use it in GitHub Desktop.
diff --git a/build/aten/src/ATen/Declarations.yaml b/build/aten/src/ATen/Declarations.yaml
index 31207af18..c36fd7cd5 100644
--- a/build/aten/src/ATen/Declarations.yaml
+++ b/build/aten/src/ATen/Declarations.yaml
@@ -23590,7 +23590,7 @@
with_gil: false
deprecated: false
- name: _copy_same_type_
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_copy_same_type_(Tensor(a!) self, Tensor src) -> void
method_prefix_derived: ''
arguments:
@@ -23846,7 +23846,7 @@
with_gil: false
deprecated: false
- name: cudnn_affine_grid_generator
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int
H, int W) -> Tensor grid
method_prefix_derived: ''
@@ -23894,7 +23894,7 @@
with_gil: false
deprecated: false
- name: cudnn_affine_grid_generator_backward
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int
C, int H, int W) -> Tensor grad_theta
method_prefix_derived: ''
@@ -24705,7 +24705,7 @@
with_gil: false
deprecated: false
- name: cudnn_grid_sampler
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
method_prefix_derived: ''
arguments:
@@ -28032,9 +28032,9 @@
with_gil: false
deprecated: false
- name: gesv_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::gesv(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!)
- lu) ->(Tensor(a!), Tensor(b!))
+ lu) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -28457,7 +28457,7 @@
with_gil: false
deprecated: false
- name: _cufft_set_plan_cache_max_size
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_cufft_set_plan_cache_max_size(int max_size) -> void
method_prefix_derived: ''
arguments:
@@ -28483,7 +28483,7 @@
with_gil: false
deprecated: false
- name: _cufft_clear_plan_cache
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_cufft_clear_plan_cache() -> void
method_prefix_derived: ''
arguments: []
@@ -29238,9 +29238,9 @@
with_gil: false
deprecated: false
- name: kthvalue_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False,
- *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -30608,9 +30608,9 @@
with_gil: false
deprecated: false
- name: max_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!)
- max, Tensor(b!) max_values) ->(Tensor(a!) values, Tensor(b!) indices)
+ max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -31326,9 +31326,9 @@
with_gil: false
deprecated: false
- name: median_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::median(Tensor self, int dim, bool keepdim=False, *, Tensor(a!)
- values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -31431,9 +31431,9 @@
with_gil: false
deprecated: false
- name: min_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!)
- min, Tensor(b!) min_indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -32670,9 +32670,9 @@
with_gil: false
deprecated: false
- name: mode_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::mode(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!)
- values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -34135,7 +34135,7 @@
with_gil: false
deprecated: false
- name: rand_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::rand(int[] size, *, Generator? generator, Tensor(a!) out) ->
Tensor(a!)
method_prefix_derived: ''
@@ -34451,7 +34451,7 @@
with_gil: false
deprecated: false
- name: randint_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::randint(int high, int[] size, *, Generator? generator, Tensor(a!)
out) -> Tensor(a!)
method_prefix_derived: ''
@@ -34542,7 +34542,7 @@
with_gil: false
deprecated: false
- name: randint_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::randint(int low, int high, int[] size, *, Generator? generator,
Tensor(a!) out) -> Tensor(a!)
method_prefix_derived: ''
@@ -34848,7 +34848,7 @@
with_gil: false
deprecated: false
- name: randn_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::randn(int[] size, *, Generator? generator, Tensor(a!) out)
-> Tensor(a!)
method_prefix_derived: ''
@@ -35054,7 +35054,7 @@
with_gil: false
deprecated: false
- name: randperm_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::randperm(int n, *, Generator? generator, Tensor(a!) out) ->
Tensor(a!)
method_prefix_derived: ''
@@ -36604,7 +36604,7 @@
with_gil: false
deprecated: false
- name: _sparse_dense_add_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_sparse_dense_add(Tensor self, SparseTensorRef other, *, Scalar
alpha=1, Tensor(a!) out) -> Tensor(a!)
method_prefix_derived: ''
@@ -41931,7 +41931,7 @@
with_gil: false
deprecated: false
- name: sparse_mask
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::sparse_mask(Tensor self, SparseTensorRef mask) -> Tensor
method_prefix_derived: ''
arguments:
@@ -44454,7 +44454,7 @@
with_gil: false
deprecated: false
- name: data_ptr
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::data_ptr(Tensor self) -> void*
method_prefix_derived: ''
arguments:
@@ -44480,7 +44480,7 @@
with_gil: false
deprecated: false
- name: set_
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::set_(Tensor(a!) self, Storage source) -> Tensor(a!)
method_prefix_derived: ''
arguments:
@@ -44511,7 +44511,7 @@
with_gil: false
deprecated: false
- name: set_
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::set_(Tensor(a!) self, Storage source, int storage_offset, int[]
size, int[] stride=[]) -> Tensor(a!)
method_prefix_derived: ''
@@ -49310,7 +49310,7 @@
with_gil: false
deprecated: false
- name: _gather_sparse_backward
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_gather_sparse_backward(Tensor self, int dim, Tensor index,
Tensor grad) -> Tensor
method_prefix_derived: ''
@@ -49546,9 +49546,9 @@
with_gil: false
deprecated: false
- name: gels_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::gels(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49632,9 +49632,9 @@
with_gil: false
deprecated: false
- name: trtrs_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::trtrs(Tensor self, Tensor A, bool upper=True, bool transpose=False,
- bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) ->(Tensor(a!), Tensor(b!))
+ bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49755,9 +49755,9 @@
with_gil: false
deprecated: false
- name: symeig_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True,
- *, Tensor(a!) e, Tensor(b!) V) ->(Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
+ *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49862,9 +49862,9 @@
with_gil: false
deprecated: false
- name: eig_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::eig(Tensor self, bool eigenvectors=False, *, Tensor(a!) e,
- Tensor(b!) v) ->(Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
+ Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49957,9 +49957,9 @@
with_gil: false
deprecated: false
- name: svd_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::svd(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!)
- U, Tensor(b!) S, Tensor(c!) V) ->(Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
+ U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -50381,9 +50381,9 @@
with_gil: false
deprecated: false
- name: pstrf_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::pstrf(Tensor self, bool upper=True, Scalar tol=-1, *, Tensor(a!)
- u, Tensor(b!) pivot) ->(Tensor(a!) u, Tensor(b!) pivot)
+ u, Tensor(b!) pivot) -> (Tensor(a!) u, Tensor(b!) pivot)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -50488,8 +50488,8 @@
with_gil: false
deprecated: false
- name: qr_out
- matches_jit_signature: false
- schema_string: aten::qr(Tensor self, *, Tensor(a!) Q, Tensor(b!) R) ->(Tensor(a!)
+ matches_jit_signature: true
+ schema_string: aten::qr(Tensor self, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!)
Q, Tensor(b!) R)
method_prefix_derived: ''
arguments:
@@ -50570,8 +50570,8 @@
with_gil: false
deprecated: false
- name: geqrf_out
- matches_jit_signature: false
- schema_string: aten::geqrf(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) ->(Tensor(a!)
+ matches_jit_signature: true
+ schema_string: aten::geqrf(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!)
a, Tensor(b!) tau)
method_prefix_derived: ''
arguments:
@@ -50830,9 +50830,9 @@
with_gil: false
deprecated: false
- name: btrifact_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::btrifact(Tensor self, *, bool pivot=True, Tensor(a!) A_LU,
- Tensor(b!) pivots) ->(Tensor(a!), Tensor(b!))
+ Tensor(b!) pivots) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -50920,9 +50920,9 @@
with_gil: false
deprecated: false
- name: btrifact_with_info_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::btrifact_with_info(Tensor self, *, bool pivot=True, Tensor(a!)
- A_LU, Tensor(b!) pivots, Tensor(c!) info) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ A_LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -52504,9 +52504,9 @@
with_gil: false
deprecated: false
- name: sort_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::sort(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!)
- values, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -52645,9 +52645,9 @@
with_gil: false
deprecated: false
- name: topk_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool
- sorted=True, *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -54374,9 +54374,9 @@
with_gil: false
deprecated: false
- name: multilabel_margin_loss_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::multilabel_margin_loss_forward(Tensor self, Tensor target,
- int reduction, *, Tensor(a!) output, Tensor(b!) is_target) ->(Tensor(a!), Tensor(b!))
+ int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -54683,10 +54683,10 @@
with_gil: false
deprecated: false
- name: nll_loss_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight,
int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -55033,10 +55033,10 @@
with_gil: false
deprecated: false
- name: nll_loss2d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight,
int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -56491,9 +56491,9 @@
with_gil: false
deprecated: false
- name: log_sigmoid_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::log_sigmoid_forward(Tensor self, *, Tensor(a!) output, Tensor(b!)
- buffer) ->(Tensor(a!), Tensor(b!))
+ buffer) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -57568,9 +57568,9 @@
with_gil: false
deprecated: false
- name: adaptive_max_pool2d_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::adaptive_max_pool2d(Tensor self, int[2] output_size, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -57738,9 +57738,9 @@
with_gil: false
deprecated: false
- name: adaptive_max_pool3d_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::adaptive_max_pool3d(Tensor self, int[3] output_size, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -58422,10 +58422,10 @@
with_gil: false
deprecated: false
- name: fractional_max_pool2d_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2]
output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -58640,10 +58640,10 @@
with_gil: false
deprecated: false
- name: fractional_max_pool3d_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3]
output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -58858,10 +58858,10 @@
with_gil: false
deprecated: false
- name: max_pool2d_with_indices_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2]
stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -59144,10 +59144,10 @@
with_gil: false
deprecated: false
- name: max_pool3d_with_indices_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3]
stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62113,11 +62113,11 @@
with_gil: false
deprecated: false
- name: thnn_conv_transpose2d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_transpose2d_forward(Tensor self, Tensor weight, int[2]
kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] output_padding,
- int[2] dilation, *, Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) ->(Tensor(a!),
- Tensor(b!), Tensor(c!))
+ int[2] dilation, *, Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) ->
+ (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62289,11 +62289,11 @@
with_gil: false
deprecated: false
- name: thnn_conv_transpose2d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_transpose2d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding,
int[2] dilation, Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!)
- grad_weight, Tensor?(c!) grad_bias) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62645,11 +62645,11 @@
with_gil: false
deprecated: false
- name: thnn_conv_transpose3d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_transpose3d_forward(Tensor self, Tensor weight, int[3]
kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] output_padding,
int[3] dilation, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input)
- ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62821,11 +62821,11 @@
with_gil: false
deprecated: false
- name: thnn_conv_transpose3d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_transpose3d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding,
int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input,
- Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -63147,10 +63147,10 @@
with_gil: false
deprecated: false
- name: thnn_conv2d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size,
Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!)
- finput, Tensor(c!) fgrad_input) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -63298,11 +63298,11 @@
with_gil: false
deprecated: false
- name: thnn_conv2d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv2d_backward(Tensor grad_output, Tensor self, Tensor
weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor
fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias)
- ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -63747,10 +63747,10 @@
with_gil: false
deprecated: false
- name: thnn_conv_depthwise2d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_depthwise2d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation,
- *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) ->(Tensor(a!), Tensor(b!))
+ *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -64024,10 +64024,10 @@
with_gil: false
deprecated: false
- name: thnn_conv3d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size,
Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!)
- finput, Tensor(c!) fgrad_input) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -64175,11 +64175,11 @@
with_gil: false
deprecated: false
- name: thnn_conv3d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv3d_backward(Tensor grad_output, Tensor self, Tensor
weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor
fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias)
- ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -64492,10 +64492,10 @@
with_gil: false
deprecated: false
- name: thnn_conv_dilated2d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_dilated2d_forward(Tensor self, Tensor weight, int[2]
kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *,
- Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) ->(Tensor(a!), Tensor(b!),
+ Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) -> (Tensor(a!), Tensor(b!),
Tensor(c!))
method_prefix_derived: ''
arguments:
@@ -64656,11 +64656,11 @@
with_gil: false
deprecated: false
- name: thnn_conv_dilated2d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_dilated2d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation,
Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight,
- Tensor?(c!) grad_bias) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -64985,10 +64985,10 @@
with_gil: false
deprecated: false
- name: thnn_conv_dilated3d_forward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_dilated3d_forward(Tensor self, Tensor weight, int[3]
kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation, *,
- Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) ->(Tensor(a!), Tensor(b!),
+ Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) -> (Tensor(a!), Tensor(b!),
Tensor(c!))
method_prefix_derived: ''
arguments:
@@ -65149,11 +65149,11 @@
with_gil: false
deprecated: false
- name: thnn_conv_dilated3d_backward_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::thnn_conv_dilated3d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[] dilation,
Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight,
- Tensor?(c!) grad_bias) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
diff --git a/torch/csrc/jit/generated/register_aten_ops_0.cpp b/torch/csrc/jit/generated/register_aten_ops_0.cpp
index 613929dcd..b400c3fa6 100644
--- a/torch/csrc/jit/generated/register_aten_ops_0.cpp
+++ b/torch/csrc/jit/generated/register_aten_ops_0.cpp
@@ -4050,7 +4050,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor",
+ "aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta",
[](Stack & stack) {
autograd::profiler::RecordFunction record("cudnn_affine_grid_generator_backward");
diff --git a/torch/csrc/jit/generated/register_aten_ops_1.cpp b/torch/csrc/jit/generated/register_aten_ops_1.cpp
index d0e817587..160bebb00 100644
--- a/torch/csrc/jit/generated/register_aten_ops_1.cpp
+++ b/torch/csrc/jit/generated/register_aten_ops_1.cpp
@@ -3998,7 +3998,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor",
+ "aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid",
[](Stack & stack) {
autograd::profiler::RecordFunction record("cudnn_affine_grid_generator");
diff --git a/torch/csrc/jit/generated/register_aten_ops_2.cpp b/torch/csrc/jit/generated/register_aten_ops_2.cpp
index 332899d25..713df8a54 100644
--- a/torch/csrc/jit/generated/register_aten_ops_2.cpp
+++ b/torch/csrc/jit/generated/register_aten_ops_2.cpp
@@ -3972,7 +3972,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor",
+ "aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output",
[](Stack & stack) {
autograd::profiler::RecordFunction record("cudnn_grid_sampler");
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment