Skip to content

Instantly share code, notes, and snippets.

@apaszke
Created September 7, 2018 01:46
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save apaszke/f0821840bdcc67a977832dc58acc1b85 to your computer and use it in GitHub Desktop.
Save apaszke/f0821840bdcc67a977832dc58acc1b85 to your computer and use it in GitHub Desktop.
LOSS FUNCTIONS
"aten::binary_cross_entropy(Tensor self, Tensor target, Tensor weight=None, int reduction=ElementwiseMean) -> Tensor",
"aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=ElementwiseMean) -> Tensor",
"aten::ctc_loss(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=ElementwiseMean) -> Tensor",
"aten::ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=ElementwiseMean) -> Tensor",
"aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=ElementwiseMean) -> Tensor",
"aten::kl_div(Tensor self, Tensor target, int reduction=ElementwiseMean) -> Tensor",
"aten::l1_loss(Tensor self, Tensor target, int reduction=ElementwiseMean) -> Tensor",
"aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=ElementwiseMean) -> Tensor",
"aten::mse_loss(Tensor self, Tensor target, int reduction=ElementwiseMean) -> Tensor",
"aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor weight=None, int reduction=ElementwiseMean) -> Tensor",
"aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=ElementwiseMean) -> Tensor",
"aten::nll_loss(Tensor self, Tensor target, Tensor weight=None, int reduction=ElementwiseMean, int ignore_index=-100) -> Tensor",
"aten::nll_loss2d(Tensor self, Tensor target, Tensor weight=None, int reduction=ElementwiseMean, int ignore_index=-100) -> Tensor",
"aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=ElementwiseMean) -> Tensor",
"aten::soft_margin_loss(Tensor self, Tensor target, int reduction=ElementwiseMean) -> Tensor",
"aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=ElementwiseMean) -> Tensor",
FACTORY FUNCTIONS
"aten::randperm(int n, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::range(Scalar start, Scalar end, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::range(Scalar start, Scalar end, Scalar step, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::linspace(Scalar start, Scalar end, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::logspace(Scalar start, Scalar end, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::logspace(Scalar start, Scalar end, int steps, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::eye(int n, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::eye(int n, int m, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::arange(Scalar end, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::arange(Scalar start, Scalar end, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::arange(Scalar start, Scalar end, Scalar step, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
POSSIBLY
"aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator generator=None) -> Tensor",
"aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor, Tensor)",
"aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)",
"aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)",
"aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)",
"aten::flatten(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor",
"aten::diag(Tensor self, int diagonal=0) -> Tensor",
"aten::gru(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
"aten::rnn_relu(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
"aten::rnn_tanh(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)",
"aten::gru(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
"aten::rnn_relu(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
"aten::rnn_tanh(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)",
"aten::lstm(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)",
"aten::lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)",
LIST TYPES
ListConstruct + ListUnpack
"aten::index(Tensor self, Tensor[] indices) -> Tensor",
"aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]",
"aten::chunk(Tensor self, int chunks, int dim=0) -> Tensor[]",
"aten::split(Tensor self, int split_size, int dim=0) -> Tensor[]",
"aten::split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]",
"aten::stack(Tensor[] tensors, int dim=0) -> Tensor",
"aten::unbind(Tensor self, int dim=0) -> Tensor[]",
ALL OTHERS
"aten::bartlett_window(int window_length, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::bartlett_window(int window_length, bool periodic, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor",
"aten::blackman_window(int window_length, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::blackman_window(int window_length, bool periodic, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::cross(Tensor self, Tensor other, int dim=-1) -> Tensor",
"aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor, Tensor)",
"aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor",
"aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)",
"aten::gels(Tensor self, Tensor A) -> (Tensor, Tensor)",
"aten::geqrf(Tensor self) -> (Tensor, Tensor)",
"aten::ger(Tensor self, Tensor vec2) -> Tensor",
"aten::gesv(Tensor self, Tensor A) -> (Tensor, Tensor)",
"aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor",
"aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor",
"aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor",
"aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor",
"aten::hamming_window(int window_length, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::hamming_window(int window_length, bool periodic, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::hamming_window(int window_length, bool periodic, float alpha, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::hamming_window(int window_length, bool periodic, float alpha, float beta, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::hann_window(int window_length, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::hann_window(int window_length, bool periodic, *, ScalarType dtype=float, Layout layout=strided, Device device=[cpu, -1]) -> Tensor",
"aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor",
"aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor",
"aten::ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor",
"aten::irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor",
"aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor",
"aten::linear(Tensor input, Tensor weight, Tensor bias=None) -> Tensor",
"aten::matrix_rank(Tensor self, bool symmetric=False) -> Tensor",
"aten::matrix_rank(Tensor self, float tol, bool symmetric=False) -> Tensor",
"aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
"aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
"aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
"aten::meshgrid(Tensor[] tensors) -> Tensor[]",
"aten::orgqr(Tensor self, Tensor input2) -> Tensor",
"aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor",
"aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor",
"aten::pdist(Tensor self, float p=2) -> Tensor",
"aten::potrf(Tensor self, bool upper=True) -> Tensor",
"aten::potri(Tensor self, bool upper=True) -> Tensor",
"aten::potrs(Tensor self, Tensor input2, bool upper=True) -> Tensor",
"aten::pstrf(Tensor self, bool upper=True, Scalar tol=-1) -> (Tensor, Tensor)",
"aten::qr(Tensor self) -> (Tensor, Tensor)",
"aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor",
"aten::rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor",
"aten::slogdet(Tensor self) -> (Tensor, Tensor)",
"aten::smm(Tensor self, Tensor mat2) -> Tensor",
"aten::squeeze(Tensor self) -> Tensor",
"aten::squeeze(Tensor self, int dim) -> Tensor",
"aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor",
"aten::stft(Tensor self, int n_fft, int hop_length, int win_length, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor",
"aten::svd(Tensor self, bool some=True) -> (Tensor, Tensor, Tensor)",
"aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor, Tensor)",
"aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor",
"aten::to_dense(Tensor self) -> Tensor",
"aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor, Tensor)",
"aten::trtrs(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor, Tensor)",
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment