Skip to content

Instantly share code, notes, and snippets.

@ailzhang
Last active December 11, 2018 19:18
Show Gist options
  • Save ailzhang/c33ff60a8037760e6b4222f29d0329d4 to your computer and use it in GitHub Desktop.
Save ailzhang/c33ff60a8037760e6b4222f29d0329d4 to your computer and use it in GitHub Desktop.
Pytorch API Level.md

Torch level 1

function Symbolic_implemented
gather
equal
and
iand
or
ior
xor
ixor
lshift
ilshift
rshift
irshift
min
max
all
any
frac yes
dist
reciprocal yes
neg yes
atan2
pow
lerp
sign
fmod yes
remainder yes
addbmm
addcmul
addcdiv
multinomial
normal
tensor
_cast_byte
_cast_char
_cast_double
_cast_float
_cast_int
_cast_long
_cast_short
_cast_half
abs yes
acos yes
add yes
addmv
addr
allclose
arange
argmax
argmin
as_strided
asin yes
atan yes
baddbmm
bernoulli
bmm
broadcast_tensors
cat
ceil yes
chunk
clamp yes
contiguous
convolution
conv1d
conv2d
conv3d
conv_transpose1d
conv_transpose2d
conv_transpose3d
cos yes
cosh yes
div yes
dot
empty
resize_
empty_like
empty_strided
erf
erfc
exp yes
expm1 yes
expand
expand_as
flatten
fill_
floor yes
full
full_like
index
index_copy_
index_put
is_floating_point
is_complex
is_nonzero
is_same_size
is_signed
log yes
log10 yes
log1p yes
log2 yes
logsumexp
matmul
mean
mm yes
mul yes
mv
narrow_copy
narrow
ones
ones_like
pin_memory
rand
rand_like
randint
randint_like
randn
randn_like
randperm
range
repeat
reshape
reshape_as
round yes
rsqrt yes
select
sin yes
sinh yes
detach
size
slice
split
squeeze
stack
stride
sum
sqrt
std
prod
t yes
tan yes
tanh yes
tensordot
transpose
trunc yes
type_as yes
unsqueeze yes
var
view_as
zeros
zeros_like
norm
clone
resize_as_
zero_
sub yes
addmm yes
numel
unbind
to
storage_offset
set_
is_contiguous
is_set_to
masked_fill_
masked_scatter_
view
put_
index_add_
index_fill_
scatter_
scatter_add_
random_
uniform_
ne yes
eq yes
ge yes
le yes
gt yes
lt yes
take
index_select
masked_select
nonzero
is_tensor
is_storage
as_tensor
unique
isfinite
isinf
isnan

Torch level 2

function Symbolic_implemented
median
sort
topk
gels
trtrs
symeig
eig
svd
potrf
potrs
potri
pstrf
qr
geqrf
orgqr
ormqr
btrifact
btrifact_with_info
btrisolve
cumsum
linspace
logspace

Torch level 3

function Symbolic_implemented
lgamma
digamma
polygamma
erfinv
renorm
histc
bartlett_window
bincount
blackman_window
chain_matmul
cumprod
det
diagflat
diagonal
einsum
eye
hann_window
hamming_window
hinge_embedding_loss
ger
gesv
fft
ifft
rfft
irfft
inverse
kthvalue
logdet
matrix_rank
matrix_power
mode
mvlgamma
permute
pixel_shuffle
pinverse
slogdet
smm
sspaddmm
stft
flip
rot90
where
poisson
sparse_coo_tensor
sparse_resize_
sparse_resize_and_clear_
sparse_mask
to_dense
sparse_dim
dense_dim
coalesce
is_coalesced
indices
values
hspmm
copy_sparse_to_sparse_
to_sparse
meshgrid
cauchy_
log_normal_
exponential_
geometric_
diag
cross
triu
tril
trace
argsort
btriunpack

NN level 1

function Symbolic_implemented
binary_cross_entropy
mse_loss
nll_loss
nll_loss2d
smooth_l1_loss
elu
glu
hardtanh
leaky_relu
log_sigmoid
softplus
softshrink
threshold
avg_pool2d
avg_pool3d
max_pool2d_with_indices
max_pool3d_with_indices
max_unpool2d
max_unpool3d
upsample_linear1d
upsample_bilinear2d
upsample_trilinear3d
upsample_nearest1d
upsample_nearest2d
upsample_nearest3d
dropout
feature_dropout
avg_pool1d
batch_norm
bilinear
binary_cross_entropy_with_logits
embedding
group_norm
instance_norm
linear
log_softmax
max_pool1d_with_indices
max_pool1d
max_pool2d
max_pool3d
relu yes
prelu
sigmoid yes
softmax
max_unpool1d
relu6
logsigmoid
tanhshrink
softsign
softmin
dropout2d
dropout3d
cross_entropy
interpolate
upsample
upsample_nearest
upsample_bilinear

NN level 2

function Symbolic_implemented
l1_loss
multi_margin_loss
multilabel_margin_loss
soft_margin_loss
ctc_loss
grid_sampler
grid_sampler_2d
grid_sampler_3d
layer_norm
lstm
gru
rnn_tanh
rnn_relu
lstm_cell
gru_cell
rnn_tanh_cell
rnn_relu_cell
_pack_padded_sequence
_pad_packed_sequence
torch.nn.utils.rnn.packedsequence
torch.nn.utils.rnn.pack_padded_sequence
torch.nn.utils.rnn.pad_packed_sequence
torch.nn.utils.rnn.pad_sequence
torch.nn.utils.rnn.pack_sequence

NN level 3

function Symbolic_implemented
unfold
adaptive_avg_pool2d
adaptive_avg_pool3d
adaptive_max_pool2d
adaptive_max_pool3d
fractional_max_pool2d
alpha_dropout
feature_alpha_dropout
adaptive_avg_pool1d
adaptive_max_pool1d
cosine_embedding_loss
embedding_bag
kl_div
margin_ranking_loss
pairwise_distance
pdist
rrelu
hardshrink
selu
celu
triplet_margin_loss
torch.nn.utils.clip_grad_norm_
torch.nn.utils.clip_grad_value_
torch.nn.utils.parameters_to_vector
torch.nn.utils.vector_to_parameters
torch.nn.utils.weight_norm
torch.nn.utils.remove_weight_norm
torch.nn.utils.spectral_norm
torch.nn.utils.remove_spectral_norm
fold
lp_pool1d
lp_pool2d
gumbel_softmax
local_response_norm
normalize
cosine_similarity
poisson_nll_loss
multilabel_soft_margin_loss
pad
grid_sample
affine_grid
torch.nn.parallel.data_parallel
calculate_gain
constant_
dirac_
xavier_uniform_
xavier_normal_
kaiming_uniform_
kaiming_normal_
orthogonal_
sparse_
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment