Skip to content

Instantly share code, notes, and snippets.

View tucan9389's full-sized avatar
🎯
Focusing

tucan9389 tucan9389

🎯
Focusing
View GitHub Profile
/*
* original code of mlir.js in netron repo
* https://github.com/lutzroeder/netron/blob/main/source/mlir.js
*/
if (process.argv.length >= 3) {
const fs = require('fs');
function runParser(textContent) {
import argparse
import os
import subprocess
from tqdm import tqdm
import random
def get_mlir_file_paths(directory):
mlir_file_paths = []
for root, dirs, files in os.walk(directory):
for file in files:
{
"functions": [
{
"name": "@test_onnx_conv_simple_pattern",
"inputs": [
"%arg0",
"%arg1"
],
"inputTypes": [
"tensor<5x3x32x32xf32>",
module {
func.func @test_onnx_conv_simple_pattern(%arg0: tensor<5x3x32x32xf32>, %arg1: tensor<?x3x2x2xf32>) -> tensor<5x?x31x31xf32> {
%0 = "onnx.NoValue"() {value} : () -> none
%1 = "onnx.Conv"(%arg0, %arg1, %0) {auto_pad = "NOTSET", kernel_shape = [2, 2], pads = [0, 0, 0, 0]} : (tensor<5x3x32x32xf32>, tensor<?x3x2x2xf32>, none) -> tensor<5x?x31x31xf32>
return %1 : tensor<5x?x31x31xf32>
}
}
{
"functions": [
{
"name": "@__inference_predict_3320",
"inputs": [
"%arg0",
"%arg1",
"%arg2",
"%arg3",
"%arg4",
{
"functions": [
{
"name": "@main",
"inputs": [
"%image",
"%weights",
"%bias"
],
"inputTypes": [
module {
func.func @test_onnx_conv_simple_pattern(%arg0: tensor<5x3x32x32xf32>, %arg1: tensor<?x3x2x2xf32>) -> tensor<5x?x31x31xf32> {
%0 = "onnx.NoValue"() {value} : () -> none
%1 = "onnx.Conv"(%arg0, %arg1, %0) {auto_pad = "NOTSET", kernel_shape = [2, 2], pads = [0, 0, 0, 0]} : (tensor<5x3x32x32xf32>, tensor<?x3x2x2xf32>, none) -> tensor<5x?x31x31xf32>
return %1 : tensor<5x?x31x31xf32>
}
}
module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 440 : i32}, tf_saved_model.semantics} {
"tf_saved_model.global_tensor"() {is_mutable, sym_name = "__sm_node4__optimizer.iter", tf_saved_model.exported_names = [], type = tensor<i64>, value = dense<0> : tensor<i64>} : () -> ()
"tf_saved_model.global_tensor"() {sym_name = "__sm_node6__optimizer.learning_rate", tf_saved_model.exported_names = [], type = tensor<f32>, value = dense<0.00999999977> : tensor<f32>} : () -> ()
"tf_saved_model.global_tensor"() {is_mutable, sym_name = "__sm_node17__model.conv1.kernel", tf_saved_model.exported_names = [], type = tensor<5x5x1x32xf32>, value = dense<""> : tensor<5x5x1x32xf32>} : () -> ()
"tf_saved_model.global_tensor"() {is_mutable, sym_name = "__sm_node26__model.conv2.kernel", tf_saved_model.exported_names = [], type = tensor<5x5x32x32xf32>, value = dense<""> : tensor<5x5x32x32xf32>} : () -> ()
"tf_saved_model.global_tensor"() {is_mutable, sym_name = "__sm_node39__model.de
{
"functions": [
{
"name": "@test_onnx_conv_simple_pattern",
"inputs": [
"%arg0",
"%arg1"
],
"inputTypes": [
"tensor<5x3x32x32xf32>",
module {
func.func @test_onnx_conv_simple_pattern(%arg0: tensor<5x3x32x32xf32>, %arg1: tensor<?x3x2x2xf32>) -> tensor<5x?x31x31xf32> {
%0 = "onnx.NoValue"() {value} : () -> none
%1 = "onnx.Conv"(%arg0, %arg1, %0) {auto_pad = "NOTSET", kernel_shape = [2, 2], pads = [0, 0, 0, 0]} : (tensor<5x3x32x32xf32>, tensor<?x3x2x2xf32>, none) -> tensor<5x?x31x31xf32>
return %1 : tensor<5x?x31x31xf32>
}
}