Skip to content

Instantly share code, notes, and snippets.

@cpuhrsch
Created March 1, 2019 06:09
Show Gist options
  • Save cpuhrsch/16a309ea3caad0890124cc3eed974187 to your computer and use it in GitHub Desktop.
Save cpuhrsch/16a309ea3caad0890124cc3eed974187 to your computer and use it in GitHub Desktop.
diff --git a/build/aten/src/ATen/Declarations.yaml b/build/aten/src/ATen/Declarations.yaml
index 7ca5b4659..e3281b884 100644
--- a/build/aten/src/ATen/Declarations.yaml
+++ b/build/aten/src/ATen/Declarations.yaml
@@ -18798,9 +18798,9 @@
with_gil: false
deprecated: false
- name: _cudnn_init_dropout_state
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed,
- TensorOptions options) -> Tensor
+ *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -18819,8 +18819,10 @@
name: dropout_seed
type: int64_t
- annotation: null
+ default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -20373,8 +20375,9 @@
with_gil: false
deprecated: false
- name: arange
- matches_jit_signature: false
- schema_string: aten::arange(Scalar end, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::arange(Scalar end, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -20386,6 +20389,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -20405,9 +20409,9 @@
with_gil: false
deprecated: false
- name: arange
- matches_jit_signature: false
- schema_string: aten::arange(Scalar start, Scalar end, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::arange(Scalar start, Scalar end, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -20424,6 +20428,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -20443,9 +20448,9 @@
with_gil: false
deprecated: false
- name: arange
- matches_jit_signature: false
- schema_string: aten::arange(Scalar start, Scalar end, Scalar step, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::arange(Scalar start, Scalar end, Scalar step, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -20467,6 +20472,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -21278,9 +21284,9 @@
with_gil: false
deprecated: false
- name: bartlett_window
- matches_jit_signature: false
- schema_string: aten::bartlett_window(int window_length, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::bartlett_window(int window_length, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -21292,6 +21298,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -21311,9 +21318,9 @@
with_gil: false
deprecated: false
- name: bartlett_window
- matches_jit_signature: false
- schema_string: aten::bartlett_window(int window_length, bool periodic, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::bartlett_window(int window_length, bool periodic, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -21330,6 +21337,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -21494,10 +21502,10 @@
with_gil: false
deprecated: false
- name: _batch_norm_impl_index_backward
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_batch_norm_impl_index_backward(int impl_index, Tensor input,
Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var,
- Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, std::array<bool,3>
+ Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3]
output_mask) -> (Tensor, Tensor, Tensor)
method_prefix_derived: ''
arguments:
@@ -21956,9 +21964,9 @@
with_gil: false
deprecated: false
- name: blackman_window
- matches_jit_signature: false
- schema_string: aten::blackman_window(int window_length, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::blackman_window(int window_length, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -21970,6 +21978,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -21989,9 +21998,9 @@
with_gil: false
deprecated: false
- name: blackman_window
- matches_jit_signature: false
- schema_string: aten::blackman_window(int window_length, bool periodic, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::blackman_window(int window_length, bool periodic, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -22008,6 +22017,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -23680,7 +23690,7 @@
with_gil: false
deprecated: false
- name: _copy_same_type_
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_copy_same_type_(Tensor(a!) self, Tensor src) -> void
method_prefix_derived: ''
arguments:
@@ -26418,8 +26428,9 @@
with_gil: false
deprecated: false
- name: empty
- matches_jit_signature: false
- schema_string: aten::empty(int[] size, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::empty(int[] size, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -26431,6 +26442,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -26541,8 +26553,9 @@
with_gil: false
deprecated: false
- name: empty_like
- matches_jit_signature: false
- schema_string: aten::empty_like(Tensor self, *, TensorOptions options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::empty_like(Tensor self, *, ScalarType dtype, Layout layout,
+ Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -26573,9 +26586,9 @@
with_gil: false
deprecated: false
- name: empty_strided
- matches_jit_signature: false
- schema_string: aten::empty_strided(int[] size, int[] stride, *, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::empty_strided(int[] size, int[] stride, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27034,8 +27047,9 @@
with_gil: false
deprecated: false
- name: eye
- matches_jit_signature: false
- schema_string: aten::eye(int n, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::eye(int n, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27047,6 +27061,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27066,8 +27081,9 @@
with_gil: false
deprecated: false
- name: eye
- matches_jit_signature: false
- schema_string: aten::eye(int n, int m, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::eye(int n, int m, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27084,6 +27100,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27367,9 +27384,9 @@
with_gil: false
deprecated: false
- name: full
- matches_jit_signature: false
- schema_string: aten::full(int[] size, Scalar fill_value, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::full(int[] size, Scalar fill_value, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27386,6 +27403,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27475,9 +27493,9 @@
with_gil: false
deprecated: false
- name: full_like
- matches_jit_signature: false
- schema_string: aten::full_like(Tensor self, Scalar fill_value, *, TensorOptions
- options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::full_like(Tensor self, Scalar fill_value, *, ScalarType dtype,
+ Layout layout, Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27739,9 +27757,9 @@
with_gil: false
deprecated: false
- name: hann_window
- matches_jit_signature: false
- schema_string: aten::hann_window(int window_length, TensorOptions options=[]) ->
- Tensor
+ matches_jit_signature: true
+ schema_string: aten::hann_window(int window_length, *, ScalarType dtype=float, Layout
+ layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27753,6 +27771,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27772,9 +27791,9 @@
with_gil: false
deprecated: false
- name: hann_window
- matches_jit_signature: false
- schema_string: aten::hann_window(int window_length, bool periodic, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::hann_window(int window_length, bool periodic, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27791,6 +27810,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27810,9 +27830,9 @@
with_gil: false
deprecated: false
- name: hamming_window
- matches_jit_signature: false
- schema_string: aten::hamming_window(int window_length, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::hamming_window(int window_length, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27824,6 +27844,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27843,9 +27864,9 @@
with_gil: false
deprecated: false
- name: hamming_window
- matches_jit_signature: false
- schema_string: aten::hamming_window(int window_length, bool periodic, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::hamming_window(int window_length, bool periodic, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27862,6 +27883,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27881,9 +27903,9 @@
with_gil: false
deprecated: false
- name: hamming_window
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::hamming_window(int window_length, bool periodic, float alpha,
- TensorOptions options=[]) -> Tensor
+ *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27905,6 +27927,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -27924,9 +27947,10 @@
with_gil: false
deprecated: false
- name: hamming_window
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::hamming_window(int window_length, bool periodic, float alpha,
- float beta, TensorOptions options=[]) -> Tensor
+ float beta, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\")
+ -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -27953,6 +27977,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -28122,9 +28147,9 @@
with_gil: false
deprecated: false
- name: gesv_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::gesv(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!)
- lu) ->(Tensor(a!), Tensor(b!))
+ lu) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -28547,7 +28572,7 @@
with_gil: false
deprecated: false
- name: _cufft_set_plan_cache_max_size
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_cufft_set_plan_cache_max_size(int max_size) -> void
method_prefix_derived: ''
arguments:
@@ -28573,7 +28598,7 @@
with_gil: false
deprecated: false
- name: _cufft_clear_plan_cache
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_cufft_clear_plan_cache() -> void
method_prefix_derived: ''
arguments: []
@@ -29328,9 +29353,9 @@
with_gil: false
deprecated: false
- name: kthvalue_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False,
- *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -29640,9 +29665,9 @@
with_gil: false
deprecated: false
- name: linspace
- matches_jit_signature: false
- schema_string: aten::linspace(Scalar start, Scalar end, int steps=100, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::linspace(Scalar start, Scalar end, int steps=100, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -29665,6 +29690,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -30109,9 +30135,9 @@
with_gil: false
deprecated: false
- name: logspace
- matches_jit_signature: false
- schema_string: aten::logspace(Scalar start, Scalar end, int steps=100, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::logspace(Scalar start, Scalar end, int steps=100, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -30134,6 +30160,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -30698,9 +30725,9 @@
with_gil: false
deprecated: false
- name: max_out
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!)
- max, Tensor(b!) max_values) ->(Tensor(a!) values, Tensor(b!) indices)
+ max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -31418,7 +31445,7 @@
- name: median_out
matches_jit_signature: false
schema_string: aten::median(Tensor self, int dim, bool keepdim=False, *, Tensor(a!)
- values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -31523,7 +31550,7 @@
- name: min_out
matches_jit_signature: false
schema_string: aten::min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!)
- min, Tensor(b!) min_indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -32762,7 +32789,7 @@
- name: mode_out
matches_jit_signature: false
schema_string: aten::mode(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!)
- values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)
+ values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -33602,8 +33629,9 @@
with_gil: false
deprecated: false
- name: ones
- matches_jit_signature: false
- schema_string: aten::ones(int[] size, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::ones(int[] size, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -33615,6 +33643,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -33694,8 +33723,9 @@
with_gil: false
deprecated: false
- name: ones_like
- matches_jit_signature: false
- schema_string: aten::ones_like(Tensor self, *, TensorOptions options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::ones_like(Tensor self, *, ScalarType dtype, Layout layout,
+ Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34085,8 +34115,9 @@
with_gil: false
deprecated: false
- name: scalar_tensor
- matches_jit_signature: false
- schema_string: aten::scalar_tensor(Scalar s, *, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::scalar_tensor(Scalar s, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34118,8 +34149,9 @@
with_gil: false
deprecated: false
- name: rand
- matches_jit_signature: false
- schema_string: aten::rand(int[] size, *, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::rand(int[] size, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34152,8 +34184,8 @@
deprecated: false
- name: rand
matches_jit_signature: false
- schema_string: aten::rand(int[] size, *, Generator? generator, TensorOptions options=[])
- -> Tensor
+ schema_string: aten::rand(int[] size, *, Generator? generator, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34292,8 +34324,9 @@
with_gil: false
deprecated: false
- name: rand_like
- matches_jit_signature: false
- schema_string: aten::rand_like(Tensor self, *, TensorOptions options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::rand_like(Tensor self, *, ScalarType dtype, Layout layout,
+ Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34324,9 +34357,9 @@
with_gil: false
deprecated: false
- name: randint
- matches_jit_signature: false
- schema_string: aten::randint(int high, int[] size, *, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randint(int high, int[] size, *, ScalarType dtype=float, Layout
+ layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34364,8 +34397,8 @@
deprecated: false
- name: randint
matches_jit_signature: false
- schema_string: aten::randint(int high, int[] size, *, Generator? generator, TensorOptions
- options=[]) -> Tensor
+ schema_string: aten::randint(int high, int[] size, *, Generator? generator, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34408,9 +34441,9 @@
with_gil: false
deprecated: false
- name: randint
- matches_jit_signature: false
- schema_string: aten::randint(int low, int high, int[] size, *, TensorOptions options=[])
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randint(int low, int high, int[] size, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34454,7 +34487,7 @@
- name: randint
matches_jit_signature: false
schema_string: aten::randint(int low, int high, int[] size, *, Generator? generator,
- TensorOptions options=[]) -> Tensor
+ ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34750,9 +34783,9 @@
with_gil: false
deprecated: false
- name: randint_like
- matches_jit_signature: false
- schema_string: aten::randint_like(Tensor self, int high, *, TensorOptions options)
- -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randint_like(Tensor self, int high, *, ScalarType dtype, Layout
+ layout, Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34788,9 +34821,9 @@
with_gil: false
deprecated: false
- name: randint_like
- matches_jit_signature: false
- schema_string: aten::randint_like(Tensor self, int low, int high, *, TensorOptions
- options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randint_like(Tensor self, int low, int high, *, ScalarType
+ dtype, Layout layout, Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34831,8 +34864,9 @@
with_gil: false
deprecated: false
- name: randn
- matches_jit_signature: false
- schema_string: aten::randn(int[] size, *, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randn(int[] size, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -34865,8 +34899,8 @@
deprecated: false
- name: randn
matches_jit_signature: false
- schema_string: aten::randn(int[] size, *, Generator? generator, TensorOptions options=[])
- -> Tensor
+ schema_string: aten::randn(int[] size, *, Generator? generator, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -35005,8 +35039,9 @@
with_gil: false
deprecated: false
- name: randn_like
- matches_jit_signature: false
- schema_string: aten::randn_like(Tensor self, *, TensorOptions options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randn_like(Tensor self, *, ScalarType dtype, Layout layout,
+ Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -35037,8 +35072,9 @@
with_gil: false
deprecated: false
- name: randperm
- matches_jit_signature: false
- schema_string: aten::randperm(int n, *, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::randperm(int n, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -35071,8 +35107,8 @@
deprecated: false
- name: randperm
matches_jit_signature: false
- schema_string: aten::randperm(int n, *, Generator? generator, TensorOptions options=[])
- -> Tensor
+ schema_string: aten::randperm(int n, *, Generator? generator, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -35185,9 +35221,9 @@
with_gil: false
deprecated: false
- name: range
- matches_jit_signature: false
- schema_string: aten::range(Scalar start, Scalar end, Scalar step=1, TensorOptions
- options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::range(Scalar start, Scalar end, Scalar step=1, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -35210,6 +35246,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -35229,9 +35266,9 @@
with_gil: false
deprecated: false
- name: range
- matches_jit_signature: false
- schema_string: aten::range(Scalar start, Scalar end, TensorOptions options=[]) ->
- Tensor
+ matches_jit_signature: true
+ schema_string: aten::range(Scalar start, Scalar end, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -35248,6 +35285,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -39842,8 +39880,9 @@
with_gil: false
deprecated: false
- name: zeros
- matches_jit_signature: false
- schema_string: aten::zeros(int[] size, TensorOptions options=[]) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::zeros(int[] size, *, ScalarType dtype=float, Layout layout=strided,
+ Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -39855,6 +39894,7 @@
default: '{}'
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -39934,8 +39974,9 @@
with_gil: false
deprecated: false
- name: zeros_like
- matches_jit_signature: false
- schema_string: aten::zeros_like(Tensor self, *, TensorOptions options) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::zeros_like(Tensor self, *, ScalarType dtype, Layout layout,
+ Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41680,9 +41721,9 @@
with_gil: false
deprecated: false
- name: sparse_coo_tensor
- matches_jit_signature: false
- schema_string: aten::sparse_coo_tensor(int[] size, *, TensorOptions options) ->
- Tensor
+ matches_jit_signature: true
+ schema_string: aten::sparse_coo_tensor(int[] size, *, ScalarType dtype, Layout layout,
+ Device device) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41714,8 +41755,8 @@
deprecated: false
- name: sparse_coo_tensor
matches_jit_signature: false
- schema_string: aten::sparse_coo_tensor(IndexTensor indices, Tensor values, *, TensorOptions
- options=[]) -> Tensor
+ schema_string: aten::sparse_coo_tensor(IndexTensor indices, Tensor values, *, ScalarType
+ dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41754,7 +41795,8 @@
- name: sparse_coo_tensor
matches_jit_signature: false
schema_string: aten::sparse_coo_tensor(IndexTensor indices, Tensor values, int[]
- size, *, TensorOptions options=[]) -> Tensor
+ size, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\")
+ -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41798,7 +41840,8 @@
- name: _sparse_coo_tensor_unsafe
matches_jit_signature: false
schema_string: aten::_sparse_coo_tensor_unsafe(IndexTensor indices, Tensor values,
- int[] size, *, TensorOptions options=[]) -> Tensor
+ int[] size, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\")
+ -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41840,9 +41883,10 @@
with_gil: false
deprecated: false
- name: _sparse_coo_tensor_with_dims
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim,
- int[] size, *, TensorOptions options) -> Tensor
+ int[] size, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\")
+ -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41861,6 +41905,7 @@
name: size
type: IntArrayRef
- annotation: null
+ default: '{}'
dynamic_type: TensorOptions
is_nullable: false
kwarg_only: true
@@ -41883,10 +41928,10 @@
with_gil: false
deprecated: false
- name: _sparse_coo_tensor_with_dims_and_tensors
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int
- dense_dim, int[] size, Tensor indices, Tensor values, *, TensorOptions options)
- -> Tensor
+ dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType dtype=float,
+ Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -41915,6 +41960,7 @@
name: values
type: const Tensor &
- annotation: null
+ default: '{}'
dynamic_type: TensorOptions
is_nullable: false
kwarg_only: true
@@ -42620,9 +42666,9 @@
with_gil: false
deprecated: false
- name: to
- matches_jit_signature: false
- schema_string: aten::to(Tensor self, TensorOptions options, bool non_blocking=False,
- bool copy=False) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::to(Tensor self, ScalarType dtype, Layout layout, Device device,
+ bool non_blocking=False, bool copy=False) -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -48050,9 +48096,9 @@
with_gil: false
deprecated: false
- name: tril_indices
- matches_jit_signature: false
- schema_string: aten::tril_indices(int row, int col, int offset=0, TensorOptions
- options=at::kLong) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::tril_indices(int row, int col, int offset=0, *, ScalarType
+ dtype=long, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -48075,6 +48121,7 @@
default: at::kLong
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -48094,9 +48141,9 @@
with_gil: false
deprecated: false
- name: triu_indices
- matches_jit_signature: false
- schema_string: aten::triu_indices(int row, int col, int offset=0, TensorOptions
- options=at::kLong) -> Tensor
+ matches_jit_signature: true
+ schema_string: aten::triu_indices(int row, int col, int offset=0, *, ScalarType
+ dtype=long, Layout layout=strided, Device device=\"cpu\") -> Tensor
method_prefix_derived: ''
arguments:
- annotation: null
@@ -48119,6 +48166,7 @@
default: at::kLong
dynamic_type: TensorOptions
is_nullable: false
+ kwarg_only: true
name: options
type: const TensorOptions &
method_of:
@@ -49400,7 +49448,7 @@
with_gil: false
deprecated: false
- name: _gather_sparse_backward
- matches_jit_signature: false
+ matches_jit_signature: true
schema_string: aten::_gather_sparse_backward(Tensor self, int dim, Tensor index,
Tensor grad) -> Tensor
method_prefix_derived: ''
@@ -49638,7 +49686,7 @@
- name: gels_out
matches_jit_signature: false
schema_string: aten::gels(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49724,7 +49772,7 @@
- name: trtrs_out
matches_jit_signature: false
schema_string: aten::trtrs(Tensor self, Tensor A, bool upper=True, bool transpose=False,
- bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) ->(Tensor(a!), Tensor(b!))
+ bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49847,7 +49895,7 @@
- name: symeig_out
matches_jit_signature: false
schema_string: aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True,
- *, Tensor(a!) e, Tensor(b!) V) ->(Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
+ *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -49954,7 +50002,7 @@
- name: eig_out
matches_jit_signature: false
schema_string: aten::eig(Tensor self, bool eigenvectors=False, *, Tensor(a!) e,
- Tensor(b!) v) ->(Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
+ Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -50049,7 +50097,7 @@
- name: svd_out
matches_jit_signature: false
schema_string: aten::svd(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!)
- U, Tensor(b!) S, Tensor(c!) V) ->(Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
+ U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -50473,7 +50521,7 @@
- name: pstrf_out
matches_jit_signature: false
schema_string: aten::pstrf(Tensor self, bool upper=True, Scalar tol=-1, *, Tensor(a!)
- u, Tensor(b!) pivot) ->(Tensor(a!) u, Tensor(b!) pivot)
+ u, Tensor(b!) pivot) -> (Tensor(a!) u, Tensor(b!) pivot)
method_prefix_derived: ''
arguments:
- allocate: true
@@ -50579,7 +50627,7 @@
deprecated: false
- name: qr_out
matches_jit_signature: false
- schema_string: aten::qr(Tensor self, *, Tensor(a!) Q, Tensor(b!) R) ->(Tensor(a!)
+ schema_string: aten::qr(Tensor self, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!)
Q, Tensor(b!) R)
method_prefix_derived: ''
arguments:
@@ -50661,7 +50709,7 @@
deprecated: false
- name: geqrf_out
matches_jit_signature: false
- schema_string: aten::geqrf(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) ->(Tensor(a!)
+ schema_string: aten::geqrf(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!)
a, Tensor(b!) tau)
method_prefix_derived: ''
arguments:
@@ -50922,7 +50970,7 @@
- name: btrifact_out
matches_jit_signature: false
schema_string: aten::btrifact(Tensor self, *, bool pivot=True, Tensor(a!) A_LU,
- Tensor(b!) pivots) ->(Tensor(a!), Tensor(b!))
+ Tensor(b!) pivots) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -51012,7 +51060,7 @@
- name: btrifact_with_info_out
matches_jit_signature: false
schema_string: aten::btrifact_with_info(Tensor self, *, bool pivot=True, Tensor(a!)
- A_LU, Tensor(b!) pivots, Tensor(c!) info) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ A_LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -52596,7 +52644,7 @@
- name: sort_out
matches_jit_signature: false
schema_string: aten::sort(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!)
- values, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -52737,7 +52785,7 @@
- name: topk_out
matches_jit_signature: false
schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool
- sorted=True, *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -54466,7 +54514,7 @@
- name: multilabel_margin_loss_forward_out
matches_jit_signature: false
schema_string: aten::multilabel_margin_loss_forward(Tensor self, Tensor target,
- int reduction, *, Tensor(a!) output, Tensor(b!) is_target) ->(Tensor(a!), Tensor(b!))
+ int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -54776,7 +54824,7 @@
matches_jit_signature: false
schema_string: aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight,
int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -55126,7 +55174,7 @@
matches_jit_signature: false
schema_string: aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight,
int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -56583,7 +56631,7 @@
- name: log_sigmoid_forward_out
matches_jit_signature: false
schema_string: aten::log_sigmoid_forward(Tensor self, *, Tensor(a!) output, Tensor(b!)
- buffer) ->(Tensor(a!), Tensor(b!))
+ buffer) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -57660,7 +57708,7 @@
- name: adaptive_max_pool2d_out
matches_jit_signature: false
schema_string: aten::adaptive_max_pool2d(Tensor self, int[2] output_size, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -57830,7 +57878,7 @@
- name: adaptive_max_pool3d_out
matches_jit_signature: false
schema_string: aten::adaptive_max_pool3d(Tensor self, int[3] output_size, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -58515,7 +58563,7 @@
matches_jit_signature: false
schema_string: aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2]
output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -58733,7 +58781,7 @@
matches_jit_signature: false
schema_string: aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3]
output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices)
- ->(Tensor(a!), Tensor(b!))
+ -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -58951,7 +58999,7 @@
matches_jit_signature: false
schema_string: aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2]
stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -59237,7 +59285,7 @@
matches_jit_signature: false
schema_string: aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3]
stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!)
- output, Tensor(b!) indices) ->(Tensor(a!), Tensor(b!))
+ output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62206,8 +62254,8 @@
matches_jit_signature: false
schema_string: aten::thnn_conv_transpose2d_forward(Tensor self, Tensor weight, int[2]
kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] output_padding,
- int[2] dilation, *, Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) ->(Tensor(a!),
- Tensor(b!), Tensor(c!))
+ int[2] dilation, *, Tensor(a!) output, Tensor(b!) columns, Tensor(c!) ones) ->
+ (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62383,7 +62431,7 @@
schema_string: aten::thnn_conv_transpose2d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding,
int[2] dilation, Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!)
- grad_weight, Tensor?(c!) grad_bias) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62739,7 +62787,7 @@
schema_string: aten::thnn_conv_transpose3d_forward(Tensor self, Tensor weight, int[3]
kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] output_padding,
int[3] dilation, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input)
- ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -62915,7 +62963,7 @@
schema_string: aten::thnn_conv_transpose3d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding,
int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input,
- Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -63240,7 +63288,7 @@
matches_jit_signature: false
schema_string: aten::thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size,
Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!)
- finput, Tensor(c!) fgrad_input) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -63392,7 +63440,7 @@
schema_string: aten::thnn_conv2d_backward(Tensor grad_output, Tensor self, Tensor
weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor
fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias)
- ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -63840,7 +63888,7 @@
matches_jit_signature: false
schema_string: aten::thnn_conv_depthwise2d_backward(Tensor grad_output, Tensor self,
Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation,
- *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) ->(Tensor(a!), Tensor(b!))
+ *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) -> (Tensor(a!), Tensor(b!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -64117,7 +64165,7 @@
matches_jit_signature: false
schema_string: aten::thnn_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size,
Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!)
- finput, Tensor(c!) fgrad_input) ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
@@ -64269,7 +64317,7 @@
schema_string: aten::thnn_conv3d_backward(Tensor grad_output, Tensor self, Tensor
weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor
fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias)
- ->(Tensor(a!), Tensor(b!), Tensor(c!))
+ -> (Tensor(a!), Tensor(b!), Tensor(c!))
method_prefix_derived: ''
arguments:
- allocate: true
diff --git a/build/aten/src/ATen/Functions.h b/build/aten/src/ATen/Functions.h
index 36a1ea54e..e6beec41d 100644
--- a/build/aten/src/ATen/Functions.h
+++ b/build/aten/src/ATen/Functions.h
@@ -547,7 +547,7 @@ static inline std::tuple<Tensor,Tensor> _cudnn_ctc_loss(const Tensor & log_probs
static inline Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional);
static inline std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state);
static inline std::tuple<Tensor,Tensor,Tensor,std::vector<Tensor>> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array<bool,4> output_mask);
-static inline Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options);
+static inline Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options={});
static inline std::tuple<Tensor,Tensor> _fused_dropout(const Tensor & self, double p, Generator * generator=nullptr);
static inline Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale);
static inline Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape);
@@ -1088,8 +1088,8 @@ static inline Tensor sparse_coo_tensor(IntArrayRef size, const TensorOptions & o
static inline Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, const TensorOptions & options={});
static inline Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options={});
static inline Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options={});
-static inline Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options);
-static inline Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, const TensorOptions & options);
+static inline Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options={});
+static inline Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, const TensorOptions & options={});
static inline Tensor & hspmm_out(Tensor & out, const Tensor & mat1, const Tensor & mat2);
static inline Tensor hspmm(const Tensor & mat1, const Tensor & mat2);
static inline Tensor & copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking=false);
diff --git a/build/aten/src/ATen/NativeFunctions.h b/build/aten/src/ATen/NativeFunctions.h
index ac6b19a44..4d9592241 100644
--- a/build/aten/src/ATen/NativeFunctions.h
+++ b/build/aten/src/ATen/NativeFunctions.h
@@ -76,7 +76,7 @@ CAFFE2_API std::tuple<Tensor,Tensor> _cudnn_ctc_loss(const Tensor & log_probs, c
CAFFE2_API Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional);
CAFFE2_API std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state);
CAFFE2_API std::tuple<Tensor,Tensor,Tensor,std::vector<Tensor>> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array<bool,4> output_mask);
-CAFFE2_API Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options);
+CAFFE2_API Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options={});
CAFFE2_API std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor & self, double p, Generator * generator=nullptr);
CAFFE2_API Tensor masked_scale_cuda(const Tensor & self, const Tensor & mask, double scale);
CAFFE2_API Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape);
@@ -770,8 +770,8 @@ CAFFE2_API Tensor sparse_coo_tensor(IntArrayRef size, const TensorOptions & opti
CAFFE2_API Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, const TensorOptions & options={});
CAFFE2_API Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options={});
CAFFE2_API Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options={});
-CAFFE2_API Tensor new_with_dims_sparse(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options);
-CAFFE2_API Tensor new_with_dims_and_tensor_sparse(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, const TensorOptions & options);
+CAFFE2_API Tensor new_with_dims_sparse(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options={});
+CAFFE2_API Tensor new_with_dims_and_tensor_sparse(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, const TensorOptions & options={});
CAFFE2_API Tensor & sparse_resize_(Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
CAFFE2_API Tensor & sparse_resize_and_clear_(Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
CAFFE2_API Tensor sparse_mask_cpu(const Tensor & self, SparseTensorRef mask);
diff --git a/torch/csrc/autograd/generated/variable_factories.h b/torch/csrc/autograd/generated/variable_factories.h
index 174192467..8544564f3 100644
--- a/torch/csrc/autograd/generated/variable_factories.h
+++ b/torch/csrc/autograd/generated/variable_factories.h
@@ -105,7 +105,7 @@ inline at::Tensor from_blob(
return torch::from_blob(data, sizes, /*deleter=*/[](void*) {}, options);
}
-inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const at::TensorOptions & options) {
+inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const at::TensorOptions & options = {}) {
torch::jit::Node* node = nullptr;
std::shared_ptr<jit::tracer::TracingState> tracer_state;
if (jit::tracer::isTracing()) {
@@ -1489,7 +1489,7 @@ inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at
}
return result;
}
-inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::TensorOptions & options) {
+inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::TensorOptions & options = {}) {
torch::jit::Node* node = nullptr;
std::shared_ptr<jit::tracer::TracingState> tracer_state;
if (jit::tracer::isTracing()) {
@@ -1515,7 +1515,7 @@ inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense
}
return result;
}
-inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, const at::TensorOptions & options) {
+inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, const at::TensorOptions & options = {}) {
torch::jit::Node* node = nullptr;
std::shared_ptr<jit::tracer::TracingState> tracer_state;
if (jit::tracer::isTracing()) {
diff --git a/torch/csrc/jit/generated/register_aten_ops_0.cpp b/torch/csrc/jit/generated/register_aten_ops_0.cpp
index 5a9c7d601..e90eaa749 100644
--- a/torch/csrc/jit/generated/register_aten_ops_0.cpp
+++ b/torch/csrc/jit/generated/register_aten_ops_0.cpp
@@ -4763,7 +4763,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("full_like");
@@ -6554,7 +6554,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::randn_like(Tensor self, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::randn_like(Tensor self, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("randn_like");
diff --git a/torch/csrc/jit/generated/register_aten_ops_1.cpp b/torch/csrc/jit/generated/register_aten_ops_1.cpp
index d0e817587..5c4aae09f 100644
--- a/torch/csrc/jit/generated/register_aten_ops_1.cpp
+++ b/torch/csrc/jit/generated/register_aten_ops_1.cpp
@@ -4391,7 +4391,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::empty_like(Tensor self, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::empty_like(Tensor self, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("empty_like");
@@ -6562,7 +6562,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::rand_like(Tensor self, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::rand_like(Tensor self, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("rand_like");
@@ -7846,7 +7846,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::tril_indices(int row, int col, int offset=0, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::tril_indices(int row, int col, int offset=0, *, ScalarType dtype=long, Layout layout=strided, Device device=\"cpu\") -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("tril_indices");
@@ -7877,7 +7877,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::triu_indices(int row, int col, int offset=0, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::triu_indices(int row, int col, int offset=0, *, ScalarType dtype=long, Layout layout=strided, Device device=\"cpu\") -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("triu_indices");
diff --git a/torch/csrc/jit/generated/register_aten_ops_2.cpp b/torch/csrc/jit/generated/register_aten_ops_2.cpp
index 332899d25..90e101a58 100644
--- a/torch/csrc/jit/generated/register_aten_ops_2.cpp
+++ b/torch/csrc/jit/generated/register_aten_ops_2.cpp
@@ -5952,7 +5952,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::ones_like(Tensor self, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::ones_like(Tensor self, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("ones_like");
@@ -6270,7 +6270,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::randint_like(Tensor self, int high, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::randint_like(Tensor self, int high, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("randint_like");
@@ -6287,7 +6287,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::randint_like(Tensor self, int low, int high, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::randint_like(Tensor self, int low, int high, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("randint_like");
@@ -6919,7 +6919,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::sparse_coo_tensor(int[] size, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::sparse_coo_tensor(int[] size, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("sparse_coo_tensor");
@@ -7525,7 +7525,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::to(Tensor self, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\", bool non_blocking=False, bool copy=False) -> Tensor",
+ "aten::to(Tensor self, ScalarType dtype, Layout layout, Device device, bool non_blocking=False, bool copy=False) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("to");
@@ -7886,7 +7886,7 @@ RegisterOperators reg({
}
),
Operator(
- "aten::zeros_like(Tensor self, *, ScalarType dtype=float, Layout layout=strided, Device device=\"cpu\") -> Tensor",
+ "aten::zeros_like(Tensor self, *, ScalarType dtype, Layout layout, Device device) -> Tensor",
[](Stack & stack) {
autograd::profiler::RecordFunction record("zeros_like");
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment