Skip to content

Instantly share code, notes, and snippets.

@dhruvbird
dhruvbird / ViewLiteInterpreterModelByteCode.py
Created November 12, 2021 19:40
View the bytecode of a PyTorch Lite Interpreter Model
import zipfile
bytecode_pkl = None
with zipfile.ZipFile('AddTensorsModelOptimized.ptl', 'r') as myzip:
bytecode_pkl = myzip.open("AddTensorsModelOptimized/bytecode.pkl")
from torch.utils import show_pickle
show_pickle.DumpUnpickler.dump(bytecode_pkl, None)
(5,
('__torch__.___torch_mangle_12.AddtensorsModel.forward',
@dhruvbird
dhruvbird / OptimizedModelForwardGraph.py
Created November 12, 2021 19:39
Result of running optimized_model.forward.graph
graph(%self : __torch__.___torch_mangle_47.AddtensorsModel,
%x.1 : Tensor,
%y.1 : Tensor):
%3 : int = prim::Constant[value=1]()
%self.t1 : Tensor = prim::Constant[value= 0 5 10 [ CPUFloatType{3} ]]()
%5 : Tensor = aten::add(%self.t1, %x.1, %3) # <ipython-input-98-b27af2abc3c5>:8:8
%z.5 : Tensor = aten::add(%5, %y.1, %3) # <ipython-input-98-b27af2abc3c5>:8:8
return (%z.5)
@dhruvbird
dhruvbird / PyTorchModelHelperGraph.py
Created November 12, 2021 19:38
Result of running optimized_model.helper.graph
graph(%self : __torch__.___torch_mangle_41.AddtensorsModel,
%x.1 : Tensor,
%y.1 : Tensor):
%6 : int = prim::Constant[value=1]()
%z.1 : Tensor = prim::GetAttr[name="t1"](%self)
%7 : Tensor = aten::add(%z.1, %x.1, %6) # <ipython-input-98-b27af2abc3c5>:8:8
%z.5 : Tensor = aten::add(%7, %y.1, %6) # <ipython-input-98-b27af2abc3c5>:8:8
return (%z.5)
@dhruvbird
dhruvbird / PyTorchModelForwardGraph.py
Created November 12, 2021 19:37
Result of running scripted.forward.graph
graph(%self : __torch__.___torch_mangle_41.AddtensorsModel,
%x.1 : Tensor,
%y.1 : Tensor):
%5 : Tensor = prim::CallMethod[name="helper"](%self, %x.1, %y.1) # <ipython-input-98-b27af2abc3c5>:12:11
return (%5)
@dhruvbird
dhruvbird / RunPyTorchLiteInterpreterModel.cpp
Created November 12, 2021 19:29
Run the lite interpreter model using the PyTorch C++ API
#include <iostream>
#include <vector>
#include <torch/csrc/jit/mobile/import.h>
#include <torch/csrc/jit/mobile/module.h>
int main() {
auto model = torch::jit::_load_for_mobile("AddTensorsModelOptimized.ptl"));
std::vector<at::IValue> inputs, ret;
inputs.push_back(at::zeros({3}));
@dhruvbird
dhruvbird / SaveModelForMobileInference.py
Created November 12, 2021 19:28
Save the model for mobile inference using lite-interpreter format
optimized_model._save_for_lite_interpreter("AddTensorsModelOptimized.ptl")
@dhruvbird
dhruvbird / OptimizeModelForMobileInference.py
Created November 12, 2021 19:27
Optimize scripted model for mobile inference
from torch.utils.mobile_optimizer import optimize_for_mobile
optimized_model = optimize_for_mobile(scripted)
@dhruvbird
dhruvbird / ScriptPyTorchModel.py
Created November 12, 2021 19:26
Create a scripted model
scripted = torch.jit.script(m)
@dhruvbird
dhruvbird / RunModel.py
Created November 12, 2021 19:26
Run the model
m = AddtensorsModel()
res = m(torch.Tensor([1, 2, 3]), torch.Tensor([3, 4, 5]))
print(res)
@dhruvbird
dhruvbird / AddTensorsModel.py
Last active November 12, 2021 19:25
Simple Python code to create a very simple model
import torch
class AddTensorsModel(torch.nn.Module):
def __init__(self):
super().__init__();
self.t1 = torch.Tensor([0, 5, 10])
def helper(self, x, y):
z = self.t1
z = z + x + y