Skip to content

Instantly share code, notes, and snippets.

@qedawkins
Created August 18, 2022 17:05
Show Gist options
  • Save qedawkins/ff04b8c5e41f91f58761f9985865df4d to your computer and use it in GitHub Desktop.
Save qedawkins/ff04b8c5e41f91f58761f9985865df4d to your computer and use it in GitHub Desktop.

Python Script

Script can be found at examples/onnx_resnet18.py

import sys 

from PIL import Image
import requests

import torch
import torchvision.models as models
from torchvision import transforms

import torch_mlir

def load_and_preprocess_image(url: str):
    headers = { 
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
    }   
    img = Image.open(requests.get(url, headers=headers,
                                  stream=True).raw).convert("RGB")
    # preprocessing pipeline
    preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])  
    img_preprocessed = preprocess(img)
    return torch.unsqueeze(img_preprocessed, 0)


def load_labels():
    classes_text = requests.get(
        "https://raw.githubusercontent.com/cathyzhyi/ml-data/main/imagenet-classes.txt",
        stream=True,
    ).text
    labels = [line.strip() for line in classes_text.splitlines()]
    return labels

image_url = "https://upload.wikimedia.org/wikipedia/commons/2/26/YellowLabradorLooking_new.jpg"

print("load image from " + image_url, file=sys.stderr)
img = load_and_preprocess_image(image_url)
labels = load_labels()

resnet18 = models.resnet18(pretrained=True)
resnet18.train(False)

module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type="onnx")

import subprocess
import tempfile
import warnings

temp_module = tempfile.NamedTemporaryFile(
        mode="wt", suffix="_to_onnx.mlir", prefix="tmp_torch_"
)
temp_module.write(str(module.operation.get_asm()))

command = ['torch-mlir-opt']
command += [temp_module.name, '--mlir-elide-elementsattrs-if-larger=32']

try:
    subprocess.run(command)
except FileNotFoundError as e:
    module.dump()
    warnings.warn("Couldn't find 'torch-mlir-opt' in the PATH so the module was dumped. Please add it to the path to elide large constants.")

IR Dump

module {
  func.func @main_graph(%arg0: tensor<1x3x224x224xf32>) -> tensor<1x1000xf32> attributes {input_names = ["x.1"], output_names = ["x.20"]} {
    %0 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64x3x7x7xf32>} : () -> tensor<64x3x7x7xf32>
    %1 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64xf32>} : () -> tensor<64xf32>
    %2 = "onnx.Conv"(%arg0, %0, %1) {dilations = [1, 1], group = 1 : si64, kernel_shape = [7, 7], onnx_node_name = "Conv_0", pads = [3, 3, 3, 3], strides = [2, 2]} : (tensor<1x3x224x224xf32>, tensor<64x3x7x7xf32>, tensor<64xf32>) -> tensor<*xf32>
    %3 = "onnx.Relu"(%2) {onnx_node_name = "Relu_1"} : (tensor<*xf32>) -> tensor<*xf32>
    %4 = "onnx.MaxPoolSingleOut"(%3) {ceil_mode = 0 : si64, kernel_shape = [3, 3], onnx_node_name = "MaxPool_2", pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<*xf32>) -> tensor<*xf32>
    %5 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64x64x3x3xf32>} : () -> tensor<64x64x3x3xf32>
    %6 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64xf32>} : () -> tensor<64xf32>
    %7 = "onnx.Conv"(%4, %5, %6) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_3", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<64x64x3x3xf32>, tensor<64xf32>) -> tensor<*xf32>
    %8 = "onnx.Relu"(%7) {onnx_node_name = "Relu_4"} : (tensor<*xf32>) -> tensor<*xf32>
    %9 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64x64x3x3xf32>} : () -> tensor<64x64x3x3xf32>
    %10 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64xf32>} : () -> tensor<64xf32>
    %11 = "onnx.Conv"(%8, %9, %10) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_5", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<64x64x3x3xf32>, tensor<64xf32>) -> tensor<*xf32>
    %12 = "onnx.Add"(%11, %4) {onnx_node_name = "Add_6"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %13 = "onnx.Relu"(%12) {onnx_node_name = "Relu_7"} : (tensor<*xf32>) -> tensor<*xf32>
    %14 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64x64x3x3xf32>} : () -> tensor<64x64x3x3xf32>
    %15 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64xf32>} : () -> tensor<64xf32>
    %16 = "onnx.Conv"(%13, %14, %15) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_8", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<64x64x3x3xf32>, tensor<64xf32>) -> tensor<*xf32>
    %17 = "onnx.Relu"(%16) {onnx_node_name = "Relu_9"} : (tensor<*xf32>) -> tensor<*xf32>
    %18 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64x64x3x3xf32>} : () -> tensor<64x64x3x3xf32>
    %19 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<64xf32>} : () -> tensor<64xf32>
    %20 = "onnx.Conv"(%17, %18, %19) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_10", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<64x64x3x3xf32>, tensor<64xf32>) -> tensor<*xf32>
    %21 = "onnx.Add"(%20, %13) {onnx_node_name = "Add_11"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %22 = "onnx.Relu"(%21) {onnx_node_name = "Relu_12"} : (tensor<*xf32>) -> tensor<*xf32>
    %23 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128x64x3x3xf32>} : () -> tensor<128x64x3x3xf32>
    %24 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128xf32>} : () -> tensor<128xf32>
    %25 = "onnx.Conv"(%22, %23, %24) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_13", pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<*xf32>, tensor<128x64x3x3xf32>, tensor<128xf32>) -> tensor<*xf32>
    %26 = "onnx.Relu"(%25) {onnx_node_name = "Relu_14"} : (tensor<*xf32>) -> tensor<*xf32>
    %27 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128x64x1x1xf32>} : () -> tensor<128x64x1x1xf32>
    %28 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128xf32>} : () -> tensor<128xf32>
    %29 = "onnx.Conv"(%22, %27, %28) {dilations = [1, 1], group = 1 : si64, kernel_shape = [1, 1], onnx_node_name = "Conv_15", pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<128x64x1x1xf32>, tensor<128xf32>) -> tensor<*xf32>
    %30 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128x128x3x3xf32>} : () -> tensor<128x128x3x3xf32>
    %31 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128xf32>} : () -> tensor<128xf32>
    %32 = "onnx.Conv"(%26, %30, %31) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_16", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<128x128x3x3xf32>, tensor<128xf32>) -> tensor<*xf32>
    %33 = "onnx.Add"(%32, %29) {onnx_node_name = "Add_17"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %34 = "onnx.Relu"(%33) {onnx_node_name = "Relu_18"} : (tensor<*xf32>) -> tensor<*xf32>
    %35 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128x128x3x3xf32>} : () -> tensor<128x128x3x3xf32>
    %36 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128xf32>} : () -> tensor<128xf32>
    %37 = "onnx.Conv"(%34, %35, %36) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_19", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<128x128x3x3xf32>, tensor<128xf32>) -> tensor<*xf32>
    %38 = "onnx.Relu"(%37) {onnx_node_name = "Relu_20"} : (tensor<*xf32>) -> tensor<*xf32>
    %39 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128x128x3x3xf32>} : () -> tensor<128x128x3x3xf32>
    %40 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<128xf32>} : () -> tensor<128xf32>
    %41 = "onnx.Conv"(%38, %39, %40) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_21", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<128x128x3x3xf32>, tensor<128xf32>) -> tensor<*xf32>
    %42 = "onnx.Add"(%41, %34) {onnx_node_name = "Add_22"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %43 = "onnx.Relu"(%42) {onnx_node_name = "Relu_23"} : (tensor<*xf32>) -> tensor<*xf32>
    %44 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256x128x3x3xf32>} : () -> tensor<256x128x3x3xf32>
    %45 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256xf32>} : () -> tensor<256xf32>
    %46 = "onnx.Conv"(%43, %44, %45) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_24", pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<*xf32>, tensor<256x128x3x3xf32>, tensor<256xf32>) -> tensor<*xf32>
    %47 = "onnx.Relu"(%46) {onnx_node_name = "Relu_25"} : (tensor<*xf32>) -> tensor<*xf32>
    %48 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256x128x1x1xf32>} : () -> tensor<256x128x1x1xf32>
    %49 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256xf32>} : () -> tensor<256xf32>
    %50 = "onnx.Conv"(%43, %48, %49) {dilations = [1, 1], group = 1 : si64, kernel_shape = [1, 1], onnx_node_name = "Conv_26", pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<256x128x1x1xf32>, tensor<256xf32>) -> tensor<*xf32>
    %51 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256x256x3x3xf32>} : () -> tensor<256x256x3x3xf32>
    %52 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256xf32>} : () -> tensor<256xf32>
    %53 = "onnx.Conv"(%47, %51, %52) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_27", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<256x256x3x3xf32>, tensor<256xf32>) -> tensor<*xf32>
    %54 = "onnx.Add"(%53, %50) {onnx_node_name = "Add_28"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %55 = "onnx.Relu"(%54) {onnx_node_name = "Relu_29"} : (tensor<*xf32>) -> tensor<*xf32>
    %56 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256x256x3x3xf32>} : () -> tensor<256x256x3x3xf32>
    %57 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256xf32>} : () -> tensor<256xf32>
    %58 = "onnx.Conv"(%55, %56, %57) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_30", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<256x256x3x3xf32>, tensor<256xf32>) -> tensor<*xf32>
    %59 = "onnx.Relu"(%58) {onnx_node_name = "Relu_31"} : (tensor<*xf32>) -> tensor<*xf32>
    %60 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256x256x3x3xf32>} : () -> tensor<256x256x3x3xf32>
    %61 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<256xf32>} : () -> tensor<256xf32>
    %62 = "onnx.Conv"(%59, %60, %61) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_32", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<256x256x3x3xf32>, tensor<256xf32>) -> tensor<*xf32>
    %63 = "onnx.Add"(%62, %55) {onnx_node_name = "Add_33"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %64 = "onnx.Relu"(%63) {onnx_node_name = "Relu_34"} : (tensor<*xf32>) -> tensor<*xf32>
    %65 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512x256x3x3xf32>} : () -> tensor<512x256x3x3xf32>
    %66 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512xf32>} : () -> tensor<512xf32>
    %67 = "onnx.Conv"(%64, %65, %66) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_35", pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<*xf32>, tensor<512x256x3x3xf32>, tensor<512xf32>) -> tensor<*xf32>
    %68 = "onnx.Relu"(%67) {onnx_node_name = "Relu_36"} : (tensor<*xf32>) -> tensor<*xf32>
    %69 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512x256x1x1xf32>} : () -> tensor<512x256x1x1xf32>
    %70 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512xf32>} : () -> tensor<512xf32>
    %71 = "onnx.Conv"(%64, %69, %70) {dilations = [1, 1], group = 1 : si64, kernel_shape = [1, 1], onnx_node_name = "Conv_37", pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<512x256x1x1xf32>, tensor<512xf32>) -> tensor<*xf32>
    %72 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512x512x3x3xf32>} : () -> tensor<512x512x3x3xf32>
    %73 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512xf32>} : () -> tensor<512xf32>
    %74 = "onnx.Conv"(%68, %72, %73) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_38", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<512x512x3x3xf32>, tensor<512xf32>) -> tensor<*xf32>
    %75 = "onnx.Add"(%74, %71) {onnx_node_name = "Add_39"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %76 = "onnx.Relu"(%75) {onnx_node_name = "Relu_40"} : (tensor<*xf32>) -> tensor<*xf32>
    %77 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512x512x3x3xf32>} : () -> tensor<512x512x3x3xf32>
    %78 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512xf32>} : () -> tensor<512xf32>
    %79 = "onnx.Conv"(%76, %77, %78) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_41", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<512x512x3x3xf32>, tensor<512xf32>) -> tensor<*xf32>
    %80 = "onnx.Relu"(%79) {onnx_node_name = "Relu_42"} : (tensor<*xf32>) -> tensor<*xf32>
    %81 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512x512x3x3xf32>} : () -> tensor<512x512x3x3xf32>
    %82 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<512xf32>} : () -> tensor<512xf32>
    %83 = "onnx.Conv"(%80, %81, %82) {dilations = [1, 1], group = 1 : si64, kernel_shape = [3, 3], onnx_node_name = "Conv_43", pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<*xf32>, tensor<512x512x3x3xf32>, tensor<512xf32>) -> tensor<*xf32>
    %84 = "onnx.Add"(%83, %76) {onnx_node_name = "Add_44"} : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    %85 = "onnx.Relu"(%84) {onnx_node_name = "Relu_45"} : (tensor<*xf32>) -> tensor<*xf32>
    %86 = "onnx.GlobalAveragePool"(%85) {onnx_node_name = "GlobalAveragePool_46"} : (tensor<*xf32>) -> tensor<*xf32>
    %87 = "onnx.Flatten"(%86) {axis = 1 : si64, onnx_node_name = "Flatten_47"} : (tensor<*xf32>) -> tensor<*xf32>
    %88 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<1000x512xf32>} : () -> tensor<1000x512xf32>
    %89 = "onnx.Constant"() {value = dense_resource<__elided__> : tensor<1000xf32>} : () -> tensor<1000xf32>
    %90 = "onnx.Gemm"(%87, %88, %89) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, onnx_node_name = "Gemm_48", transB = 1 : si64} : (tensor<*xf32>, tensor<1000x512xf32>, tensor<1000xf32>) -> tensor<1x1000xf32>
    return %90 : tensor<1x1000xf32>
  }
  "onnx.EntryPoint"() {func = @main_graph} : () -> ()
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment