Skip to content

Instantly share code, notes, and snippets.

@Wheest
Last active March 15, 2023 15:26
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Wheest/65f3d93b5aba66791fa6c3018a9da02a to your computer and use it in GitHub Desktop.
Save Wheest/65f3d93b5aba66791fa6c3018a9da02a to your computer and use it in GitHub Desktop.
Debug script for TVM int8 quantization
#!/usr/bin/env python3
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import tvm
from tvm import relay
from collections import OrderedDict
from typing import Any, List, Optional, Tuple
from torch import Tensor
np.random.seed(42)
TEST_DATASETS = ["cifar10", "imagenet", "test"]
class _DenseLayer(nn.Module):
def __init__(
self,
num_input_features: int,
growth_rate: int,
bn_size: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(
num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False,
)
self.norm2 = nn.BatchNorm2d(bn_size * growth_rate)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(
self.relu1(self.norm1(concated_features))
) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor: # noqa: F811
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
# new_features = self.conv2(self.relu2(self.norm2(prev_features)))
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(
new_features, p=self.drop_rate, training=self.training
)
return new_features
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super().__init__()
self.norm = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
num_input_features, num_output_features, kernel_size=1, stride=1, bias=False
)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return features[-1]
# return torch.cat(features, 1)
class WeeDenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False,
) -> None:
super().__init__()
# _log_api_usage_once(self)
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
(
"conv0",
nn.Conv2d(
3,
num_init_features,
kernel_size=7,
stride=2,
padding=3,
bias=False,
),
),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=2,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
# num_features = num_features + num_layers * growth_rate
# if i != len(block_config) - 1:
# trans = _Transition(
# num_input_features=num_features,
# num_output_features=num_features // 2,
# )
# self.features.add_module("transition%d" % (i + 1), trans)
# num_features = num_features // 2
break
# # Final batch norm
# self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# # Linear layer
# self.classifier = nn.Linear(num_features, num_classes)
# # Official init from torch repo.
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
out = self.features(x)
# out = F.relu(features, inplace=True)
# out = F.adaptive_avg_pool2d(out, (1, 1))
# out = torch.flatten(out, 1)
# out = self.classifier(out)
return out
def quantize(mod, params):
with relay.quantize.qconfig(calibrate_mode="global_scale", global_scale=8.0):
mod = relay.quantize.quantize(mod, params)
return mod
def run_inference(mod, dev, target, in_shape):
model = relay.create_executor("vm", mod, dev, target)
model._make_executor()
model = model.evaluate()
data = np.random.uniform(5, 10, in_shape).astype(np.float32)
prediction = model(data)
def model_opt(mod, params, run_fp16_pass=False, run_other_opts=True, fast_math=False):
# code adapted from https://github.com/AndrewZhaoLuo/TVM-Sandbox/blob/f1f9f698be2b7a8cc5bcf1167d892cd915eb7ce7/fp16_pass/benchmark_fp16.py#L19
mod = tvm.IRModule.from_expr(mod["main"])
remove_bn_pass = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.SimplifyInference(),
relay.transform.FoldConstant(),
relay.transform.FoldScaleAxis(),
]
)
mod = remove_bn_pass(mod)
if run_other_opts:
mod = tvm.relay.transform.FastMath()(mod) if fast_math else mod
mod = tvm.relay.transform.EliminateCommonSubexpr()(mod)
BindPass = tvm.relay.transform.function_pass(
lambda fn, new_mod, ctx: tvm.relay.build_module.bind_params_by_name(
fn, params
),
opt_level=1,
)
mod = BindPass(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
mod = tvm.relay.transform.CombineParallelBatchMatmul()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
if run_fp16_pass:
mod = InferType()(mod)
mod = ToMixedPrecision()(mod)
if run_other_opts and run_fp16_pass:
# run one more pass to clean up new subgraph
mod = tvm.relay.transform.EliminateCommonSubexpr()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
mod = tvm.relay.transform.CombineParallelBatchMatmul()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
mod = tvm.relay.transform.FastMath()(mod) if fast_math else mod
return mod, params
@tvm.tir.transform.prim_func_pass(opt_level=0)
def print_tir(f, mod, ctx):
print(f)
return f
def run_inference(mod, dev, target, in_shape):
model = tvm.relay.create_executor("graph", mod, dev, target)
model._make_executor()
model = model.graph_module
model._make_executor()
data = np.random.uniform(5, 10, in_shape).astype(np.float32)
model.set_input(input_name, data)
model.run()
def run_inference_fp32(mod, params, input_name, dev, target, in_shape):
with tvm.transform.PassContext(
opt_level=3, config={"tir.add_lower_pass": [(3, print_tir)]}
):
lib = tvm.relay.build(mod, target=target, params=params)
model = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
data = np.random.uniform(5, 10, in_shape).astype(np.float32)
model.set_input(input_name, data)
model.run()
def main():
device = "x86_cpu"
if device == "x86_cpu":
target = "llvm -mtriple=x86_64-linux-gnu -mcpu=core-avx2"
dev = tvm.device(target)
elif device == "arm_cpu":
dev = tvm.cpu(0)
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon"
elif device == "arm_cuda":
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon"
target = tvm.target.Target("cuda", host=target)
dev = tvm.cuda(0)
else:
raise ValueError("Unknown device:", args.device)
model = torch.hub.load(
"pytorch/vision:v0.11.0", "densenet161", pretrained=False
).eval()
model = WeeDenseNet().eval()
# model = torch.hub.load(
# "pytorch/vision:v0.11.0", "resnet50", pretrained=False
# ).eval()
# model = model_dict["densenet161-imagenet"]()
in_shape = [1, 3, 224, 224]
input_name = "input0"
input_data = torch.randn(in_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
shape_list = [(input_name, in_shape)]
mod, params = tvm.relay.frontend.from_pytorch(scripted_model, shape_list)
# exit(1)
# start = time.time()
# run_inference_fp32(mod, params, input_name, dev, target, in_shape)
# # run_tests(mod2, dev, target, test_data)
# end = time.time()
# print("fp32:", end - start)
print("loaded model")
mod, params = model_opt(mod, params)
print(mod)
mod2 = quantize(mod, params)
print("quantized")
start = time.time()
print(mod2)
run_inference(mod2, dev, target, in_shape)
end = time.time()
print(end - start)
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment