The IREE ABI natively allows monomorphic functions to be exported where arguments and results are composed of the following types:
- Byte aligned integer type (i8, i16, i32, i64)
- Floating point value (f16, f32, f64)
module { | |
func @binary_func(%arg0: tensor<16xf32>, %arg1: tensor<16xf32>) -> (tensor<16xf32>, tensor<16xf32>) attributes {iree.abi = "{\22a\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22r\22:[[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]],\22v\22:1}", iree.module.export} { | |
return %arg0, %arg1 : tensor<16xf32>, tensor<16xf32> | |
} | |
func @dict_nest(%arg0: !iree.list<?>, %arg1: tensor<f32>) -> !iree.list<?> attributes {iree.abi = "{\22a\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22slist\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]],[\22ndarray\22,\22f32\22,0]],\22r\22:[[\22sdict\22,[\22dict\22,[\22sdict\22,[\22a\22,[\22ndarray\22,\22f32\22,1,16]],[\22b\22,[\22ndarray\22,\22f32\22,1,16]]]],[\22list\22,[\22slist\22,[\22ndarray\22,\22f32\22,1,16],[\22ndarray\22,\22f32\22,1,16]]]]],\22v\22:1}", iree.module.export} { | |
%c2 = constant 2 : index | |
%c0 = |
iree_tf_compiler/TF/test/saved_model_to_iree_abi.mlir:19:3: error: list contains 'none' and cannot be accessed as '!iree.list<none>' | |
func @__inference_dict_nest_190(%arg0: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "a"]}, %arg1: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "dict", "b"]}, %arg2: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 0]}, %arg3: tensor<16xf32> {tf._user_specified_name = "mapping", tf_saved_model.index_path = [0, "list", 1]}, %arg4: tensor<f32> {tf._user_specified_name = "scalar", tf_saved_model.index_path = [1]}) -> (tensor<16xf32> {tf_saved_model.index_path = ["dict", "a"]}, tensor<16xf32> {tf_saved_model.index_path = ["dict", "b"]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 0]}, tensor<16xf32> {tf_saved_model.index_path = ["list", 1]}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf.shape<16>, #tf.shape<16 |
import ctypes | |
# See here for the types of ABIs ctypes can dispatch to: | |
# https://docs.python.org/3/library/ctypes.html#ctypes.CFUNCTYPE | |
func_address = get_function_voidstar_from_jit("foobar") | |
# Create a callable for the void* function ad func_address, taking a | |
# void* and returning an int. | |
my_compiled_func = ctypes.CFUNCTYPE(ctypes.c_uint32, ctypes.c_voidp).prototype(func_address) |
---- REAL GENERATION HERE ---- | |
//===----------------------------------------------------------------------===// | |
// Op definition for MatmulOp | |
//===----------------------------------------------------------------------===// | |
def MatmulOp : LinalgStructuredBase_Op<"matmul", [ | |
AttrSizedOperandSegments, | |
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>, | |
SingleBlockImplicitTerminator<"YieldOp"> |
File sizes on Linux x64 Release mode (not bundling into a monolithic libLLVM.so): | |
`lib/`: | |
``` | |
28K lib/BugpointPasses.so 28K lib/Bye.so 16K lib/LLVMHello.so 8.0K lib/libEngine.so 116K lib/libLLVMAggressiveInstCombine.so 7.0M lib/libLLVMAnal |
# Autogenerated by mlir-tblgen; don't manually edit. | |
import array as _ods_array | |
from . import _cext as _ods_cext | |
from . import _segmented_accessor as _ods_segmented_accessor, _equally_sized_accessor as _ods_equally_sized_accessor, _get_default_loc_context as _ods_get_default_loc_context | |
_ods_ir = _ods_cext.ir | |
@_ods_cext.register_dialect | |
class _Dialect(_ods_ir.Dialect): |
Registered operations: | |
SimpleOp('aten::abs'[1] -> aten.abs, operands=[(0, TensorValue('input'))], results=[(0, TensorValue('result'))]) | |
SimpleOp('aten::abs'[2] -> aten.abs_outref, operands=[(0, TensorValue('input')), (1, TensorOutRef('out'))], results=[(0, TensorOutRef('result'))]) | |
SimpleOp('aten::acos'[1] -> aten.acos, operands=[(0, TensorValue('input'))], results=[(0, TensorValue('result'))]) | |
SimpleOp('aten::acos'[2] -> aten.acos_outref, operands=[(0, TensorValue('input')), (1, TensorOutRef('out'))], results=[(0, TensorOutRef('result'))]) | |
SimpleOp('aten::angle'[1] -> aten.angle, operands=[(0, TensorValue('input'))], results=[(0, TensorValue('result'))]) | |
SimpleOp('aten::angle'[2] -> aten.angle_outref, operands=[(0, TensorValue('input')), (1, TensorOutRef('out'))], results=[(0, TensorOutRef('result'))]) | |
SimpleOp('aten::asin'[1] -> aten.asin, operands=[(0, TensorValue('input'))], results=[(0, TensorValue('result'))]) | |
SimpleOp('aten::asin'[2] -> aten.asin_outref, operands=[(0, TensorValue('input |
README
, updated build instructions, etc. Some notes on the design and layering.
-DTorch_DIR=.../libtorch/share/cmake/Torch
python3 -m pip install lark
)#include "nnpack.h"
(appears just in the aten_ops.cpp)
-DCMAKE_CXX_FLAGS=-I$HOME/bin/NNPACK/include
Today I implemented enough boilerplate to parse and import a (very) simple python function to a corresponding MLIR representation.
As an example:
from npcomp.compiler.frontend import *
def binary_expression():
a = 1