Skip to content

Instantly share code, notes, and snippets.

Soumith Chintala soumith

Block or report user

Report or block soumith

Hide content and notifications from this user.

Learn more about blocking users

Contact Support about this user’s behavior.

Learn more about reporting abuse

Report abuse
View GitHub Profile
View temp.py
### First, tokenize the input
import torch
tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased', do_basic_tokenize=False)
text_1 = "Who was Jim Henson ?"
text_2 = "Jim Henson was a puppeteer"
# Tokenized input
indexed_tokens = tokenizer.encode(text_1, text_2, add_special_tokens=True)
### Get the hidden states computed by `BertModel`
View synthtext.py
import numpy as np
import os
import time
import warnings
import pickle
# from accimage import Image
from PIL import Image
import io
try:
View temp.cpp
#include <torch/torch.h>
#include <iostream>
#include <ATen/Parallel.h>
#include <ATen/ATen.h>
// using namespace at;
using namespace torch;
void submodular_select(Tensor candidate_points, Tensor features_done, Tensor features)
{
View foo.py
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class simpnet_imgnet_drpall(nn.Module):
"""
args: classes
scale
network_idx (0,1):simpnet5m, simpnet8m
View densenet.py
op_version_set = 0
def forward(self,
input_1: Tensor) -> Tensor:
input_2 = torch._convolution(input_1, self.features.conv0.weight, None, [2, 2], [3, 3], [1, 1], False, [0, 0], 1, False, False, True)
input_3 = torch.batch_norm(input_2, self.features.norm0.weight, self.features.norm0.bias, self.features.norm0.running_mean, self.features.norm0.running_var, False, 0., 1.0000000000000001e-05, True)
input_4 = torch.threshold_(input_3, 0., 0.)
input_5, _0 = torch.max_pool2d_with_indices(input_4, [3, 3], [2, 2], [1, 1], [1, 1], False)
input_6 = torch.batch_norm(input_5, self.features.denseblock1.denselayer1.norm1.weight, self.features.denseblock1.denselayer1.norm1.bias, self.features.denseblock1.denselayer1.norm1.running_mean, self.features.denseblock1.denselayer1.norm1.running_var, False, 0., 1.0000000000000001e-05, True)
input_7 = torch.threshold_(input_6, 0., 0.)
input_8 = torch._convolution(input_7, self.features.denseblock1.denselayer1.conv1.weight, None, [1, 1], [0, 0], [1, 1], False, [0, 0],
View foo.cpp
std::vector<int64_t> input_size = {4, 3, 15, 17}; // B x C x H x W
std::vector<int64_t> kernel_size = {3, 5};
std::vector<int64_t> stride = {1, 2};
std::vector<int64_t> padding = {2, 1};
constexpr int out_channels = 5;
// make inputs
at::Tensor input = torch::randn(input_size);
at::Tensor weight = torch::randn({out_channels, input_size[1], kernel_size[0], kernel_size[1]});
at::Tensor bias = torch::randn({out_channels});
@soumith
soumith / pytorch_api_categorization.md
Last active Nov 16, 2018 — forked from ailzhang/pytorch_api_level.md
Pytorch API categorization.md
View pytorch_api_categorization.md

Torch level 1

function Symbolic_implemented
gather
equal
__and__, __iand__, __or__, __ior__, __xor__, __ixor__, __lshift__, __ilshift__, __rshift__, __irshift__
min, max
all
any
frac yes
View gist:d111f54f2bc7a3f08eab0bd38db11803
diff --git a/torch/csrc/jit/autodiff.cpp b/torch/csrc/jit/autodiff.cpp
index 59eb7ca11..75abe0097 100644
--- a/torch/csrc/jit/autodiff.cpp
+++ b/torch/csrc/jit/autodiff.cpp
@@ -77,7 +77,8 @@ bool isDifferentiable(Node * n) {
"aten::trunc(Tensor self) -> Tensor",
"aten::log_softmax(Tensor self, int dim) -> Tensor",
"aten::avg_pool2d(Tensor self, int[] kernel_size, int[] stride, int[] padding, bool ceil_mode, bool count_include_pad) -> Tensor",
- "aten::max_pool2d_with_indices(Tensor self, int[] kernel_size, int[] stride, int[] padding, int[] dilation, bool ceil_mode) -> (Tensor, Tensor)"
+ "aten::max_pool2d_with_indices(Tensor self, int[] kernel_size, int[] stride, int[] padding, int[] dilation, bool ceil_mode) -> (Tensor, Tensor)",
View gist:f28ab0dd859f8772ff98b1c0f683acbc
diff --git a/torch/csrc/jit/autodiff.cpp b/torch/csrc/jit/autodiff.cpp
index 59eb7ca11..75abe0097 100644
--- a/torch/csrc/jit/autodiff.cpp
+++ b/torch/csrc/jit/autodiff.cpp
@@ -77,7 +77,8 @@ bool isDifferentiable(Node * n) {
"aten::trunc(Tensor self) -> Tensor",
"aten::log_softmax(Tensor self, int dim) -> Tensor",
"aten::avg_pool2d(Tensor self, int[] kernel_size, int[] stride, int[] padding, bool ceil_mode, bool count_include_pad) -> Tensor",
- "aten::max_pool2d_with_indices(Tensor self, int[] kernel_size, int[] stride, int[] padding, int[] dilation, bool ceil_mode) -> (Tensor, Tensor)"
+ "aten::max_pool2d_with_indices(Tensor self, int[] kernel_size, int[] stride, int[] padding, int[] dilation, bool ceil_mode) -> (Tensor, Tensor)",
View micro.py
import time
import torch
import torchvision
batch_size = 128
num_iterations = 10
resnet50 = torchvision.models.resnet50().to(device="cuda")
inp = torch.randn(batch_size, 3, 224, 224, device="cuda")
target = torch.arange(batch_size, device="cuda")
You can’t perform that action at this time.