Skip to content

Instantly share code, notes, and snippets.

View lanpa's full-sized avatar

Tzu-Wei Huang lanpa

View GitHub Profile
@lanpa
lanpa / gist:b5888c57ccfad66e3db2ce94bab53f62
Created June 5, 2019 17:00
result of jit.trace on torchvision's alexnet implementation
trace = torch.jit.trace(model_alexnet, args_alexnet)
graph = trace.graph
print(graph)
============================================
.graph(%input.1 : Double(2, 3, 224, 224),
%196 : Tensor,
%197 : Tensor,
%198 : Tensor,
%199 : Tensor,
%200 : Tensor,
@lanpa
lanpa / gist:4069f3eb57d5a140207e45b9a3fc153c
Created October 14, 2018 06:56
example output of fix-graph
graph(%0 : Float(1, 3)
%1 : Float(5, 3)
%2 : Float(5)) {
%3 : Dynamic = onnx::Constant[value={0}]()
%4 : Dynamic = onnx::Gemm[alpha=1, beta=0, transB=1](%0, %1, %3)
%5 : Float(1, 5) = onnx::Add(%2, %4)
return (%5);
}
%3 : Dynamic = onnx::Constant[value={0}]()
running install
running bdist_egg
running egg_info
writing tensorboardX.egg-info/PKG-INFO
writing dependency_links to tensorboardX.egg-info/dependency_links.txt
writing requirements to tensorboardX.egg-info/requires.txt
writing top-level names to tensorboardX.egg-info/top_level.txt
reading manifest file 'tensorboardX.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
writing manifest file 'tensorboardX.egg-info/SOURCES.txt'
running install
Done generating tensorboardX/proto/*pb2*.py
running build
running build_py
copying tensorboardX/proto/types_pb2.py -> build/lib/tensorboardX/proto
copying tensorboardX/proto/resource_handle_pb2.py -> build/lib/tensorboardX/proto
copying tensorboardX/proto/event_pb2.py -> build/lib/tensorboardX/proto
copying tensorboardX/proto/summary_pb2.py -> build/lib/tensorboardX/proto
copying tensorboardX/proto/graph_pb2.py -> build/lib/tensorboardX/proto
copying tensorboardX/proto/layout_pb2.py -> build/lib/tensorboardX/proto
--------------------------- --------------- --------------- --------------- --------------- ---------------
Name CPU time CUDA time Calls CPU total CUDA total
--------------------------- --------------- --------------- --------------- --------------- ---------------
conv2d 462.645us 0.000us 1 462.645us 0.000us
convolution 461.316us 0.000us 1 461.316us 0.000us
_convolution 459.809us 0.000us 1 459.809us 0.000us
tensor 2.967us 0.000us 1 2.967us 0.000us
_convolution_nogroup 445.746us 0.000us 1 445.746us 0.000us
thnn_conv2d 441.713us 0.000us 1 441.713us 0.000us
import torch.nn as nn
import torch
class SimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(6,9)
def forward(self, x):
return self.fc(x.view(-1, 6))
@lanpa
lanpa / gist:53ff053d1c76129fbdd54655efeee548
Last active February 15, 2018 03:45
pytorch 0.3.1 trace
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.bn = nn.BatchNorm2d(20)
@lanpa
lanpa / gist:debcb8b8d5e674f176817b929f1747ea
Last active February 15, 2018 03:45
pytorch 0.4.0a0+fe810ed trace
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.bn = nn.BatchNorm2d(20)
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from tensorboardX import SummaryWriter
# Training settings
with open ('data.csv') as f:
lines = f.readlines()
valid = []
for line in lines:
a, b = line.strip('\n').split(',')
if a=='1' or a=='2' or a=='3':
print(a, b)
valid.append((int(a), int(b)))