Skip to content

Instantly share code, notes, and snippets.

@Abhishek-Varma
Created January 31, 2024 10:41
Show Gist options
  • Save Abhishek-Varma/3d39c3865b53b168a987619d44a64476 to your computer and use it in GitHub Desktop.
Save Abhishek-Varma/3d39c3865b53b168a987619d44a64476 to your computer and use it in GitHub Desktop.
Failure at `iree-hal-serialize-target-executables` log
This file has been truncated, but you can view the full file.
// -----// IR Dump Before AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- //
module {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before AutoInputConversionPipeline (iree-auto-input-conversion) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before IREEImportPublic (iree-import-public) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before ImportMLProgram (iree-import-ml-program) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before SanitizeModuleNames (iree-sanitize-module-names) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before Inliner (inline) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = call @_matmul_static(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
func.func private @_matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func private @_matmul_static(%arg0: tensor<8x16xf32>, %arg1: tensor<16x32xf32>) -> tensor<8x32xf32> {
%0 = tensor.empty() : tensor<8x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<8x32xf32>) -> tensor<8x32xf32>
%2 = linalg.matmul ins(%arg0, %arg1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%1 : tensor<8x32xf32>) -> tensor<8x32xf32>
return %2 : tensor<8x32xf32>
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = call @_matmul_static(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%cst = arith.constant 0.000000e+00 : f32
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before SymbolDCE (symbol-dce) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before DemoteF64ToF32 (iree-util-demote-f64-to-f32) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before RemoveZeroExtentTensors (iree-global-opt-remove-zero-extent-tensors) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before DetachElementwiseFromNamedOps (iree-global-opt-detach-elementwise-from-named-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before LinalgNamedOpConversion (linalg-named-op-conversion) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Convert1X1FilterConv2DToMatmul (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before EraseUnusedLinalgOperands (iree-global-opt-erase-unused-linalg-operands) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before ExpandTensorShapes (iree-global-opt-expand-tensor-shapes) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before ConvertElementwiseToLinalg (convert-elementwise-to-linalg) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before DecomposeConcat (iree-global-opt-decompose-concat) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FoldUnitExtentDims (iree-flow-fold-unit-extent-dims) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FuseDequantizationMatmul (iree-global-opt-fuse-dequantization-matmul) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before LiftGenericToTransposeBatchMatmul (iree-global-opt-lift-generic-to-tranpose-batch-matmul) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before SetEncoding (iree-global-opt-set-encoding) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before MaterializeHomogeneousEncodings (iree-global-opt-materialize-homogeneous-encodings) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#map = affine_map<(d0, d1, d2) -> (d0, d2)>
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
#map3 = affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>
#map4 = affine_map<()[s0] -> ((8 ceildiv s0) * s0)>
#map5 = affine_map<()[s0] -> ((32 ceildiv s0) * s0)>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2:2 = iree_linalg_ext.upper_bound_tile_size tensor<8x16xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f32, f32], matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>> -> index, index
%3 = affine.apply #map3()[%2#0, %c8]
%4 = affine.apply #map3()[%2#1, %c16]
%padded = tensor.pad %0 low[0, 0] high[%3, %4] {
^bb0(%arg2: index, %arg3: index):
tensor.yield %cst : f32
} : tensor<8x16xf32> to tensor<?x?xf32>
%5 = iree_linalg_ext.set_encoding %padded : tensor<?x?xf32> -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f32, f32], original_type = tensor<8x16xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>
%6:2 = iree_linalg_ext.upper_bound_tile_size tensor<16x32xf32, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f32, f32], matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>> -> index, index
%7 = affine.apply #map3()[%6#0, %c16]
%8 = affine.apply #map3()[%6#1, %c32]
%padded_0 = tensor.pad %1 low[0, 0] high[%7, %8] {
^bb0(%arg2: index, %arg3: index):
tensor.yield %cst : f32
} : tensor<16x32xf32> to tensor<?x?xf32>
%9 = iree_linalg_ext.set_encoding %padded_0 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f32, f32], original_type = tensor<16x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>
%10:2 = iree_linalg_ext.upper_bound_tile_size tensor<8x32xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>> -> index, index
%11 = affine.apply #map4()[%10#0]
%12 = affine.apply #map5()[%10#1]
%13 = tensor.empty(%11, %12) : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>) -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>
%15 = linalg.matmul ins(%5, %9 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f32, f32], original_type = tensor<8x16xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>, tensor<?x?xf32, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f32, f32], original_type = tensor<16x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>) outs(%14 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>) -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>>
%16 = iree_linalg_ext.unset_encoding %15 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [#map, #map1, #map2]>> -> tensor<?x?xf32>
%extracted_slice = tensor.extract_slice %16[0, 0] [8, 32] [1, 1] : tensor<?x?xf32> to tensor<8x32xf32>
%17 = hal.tensor.export %extracted_slice "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %17 : !hal.buffer_view
}
}
// -----// IR Dump Before MaterializeEncodingIntoNop (iree-codegen-materialize-encoding-into-nop) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2:2 = iree_linalg_ext.upper_bound_tile_size tensor<8x16xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f32, f32], matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>> -> index, index
%3 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%2#0, %c8]
%4 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%2#1, %c16]
%padded = tensor.pad %0 low[0, 0] high[%3, %4] {
^bb0(%arg2: index, %arg3: index):
tensor.yield %cst : f32
} : tensor<8x16xf32> to tensor<?x?xf32>
%5 = iree_linalg_ext.set_encoding %padded : tensor<?x?xf32> -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f32, f32], original_type = tensor<8x16xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>
%6:2 = iree_linalg_ext.upper_bound_tile_size tensor<16x32xf32, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f32, f32], matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>> -> index, index
%7 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%6#0, %c16]
%8 = affine.apply affine_map<()[s0, s1] -> (-s1 + (s1 ceildiv s0) * s0)>()[%6#1, %c32]
%padded_0 = tensor.pad %1 low[0, 0] high[%7, %8] {
^bb0(%arg2: index, %arg3: index):
tensor.yield %cst : f32
} : tensor<16x32xf32> to tensor<?x?xf32>
%9 = iree_linalg_ext.set_encoding %padded_0 : tensor<?x?xf32> -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f32, f32], original_type = tensor<16x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>
%10:2 = iree_linalg_ext.upper_bound_tile_size tensor<8x32xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>> -> index, index
%11 = affine.apply affine_map<()[s0] -> ((8 ceildiv s0) * s0)>()[%10#0]
%12 = affine.apply affine_map<()[s0] -> ((32 ceildiv s0) * s0)>()[%10#1]
%13 = tensor.empty(%11, %12) : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>) -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>
%15 = linalg.matmul ins(%5, %9 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = LHS, element_types = [f32, f32, f32], original_type = tensor<8x16xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>, tensor<?x?xf32, #iree_linalg_ext.encoding<role = RHS, element_types = [f32, f32, f32], original_type = tensor<16x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>) outs(%14 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>) -> tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>>
%16 = iree_linalg_ext.unset_encoding %15 : tensor<?x?xf32, #iree_linalg_ext.encoding<role = RESULT, element_types = [f32, f32, f32], original_type = tensor<8x32xf32>, matmul_narrow_M = 8 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>> -> tensor<?x?xf32>
%extracted_slice = tensor.extract_slice %16[0, 0] [8, 32] [1, 1] : tensor<?x?xf32> to tensor<8x32xf32>
%17 = hal.tensor.export %extracted_slice "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %17 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%padded = tensor.pad %0 low[0, 0] high[%c0, %c0] {
^bb0(%arg2: index, %arg3: index):
tensor.yield %cst : f32
} : tensor<8x16xf32> to tensor<?x?xf32>
%padded_0 = tensor.pad %1 low[0, 0] high[%c0, %c0] {
^bb0(%arg2: index, %arg3: index):
tensor.yield %cst : f32
} : tensor<16x32xf32> to tensor<?x?xf32>
%2 = tensor.empty(%c8, %c32) : tensor<?x?xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?xf32>) -> tensor<?x?xf32>
%4 = linalg.matmul ins(%padded, %padded_0 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%3 : tensor<?x?xf32>) -> tensor<?x?xf32>
%extracted_slice = tensor.extract_slice %4[0, 0] [8, 32] [1, 1] : tensor<?x?xf32> to tensor<8x32xf32>
%5 = hal.tensor.export %extracted_slice "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before CSE (cse) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before SimplifyPackUnpack (iree-global-opt-simplify-pack-unpack) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before GeneralizeLinalgNamedOps (iree-global-opt-generalize-linalg-named-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before CSE (cse) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before HoistIntoGlobals (iree-util-hoist-into-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before JitGlobals (iree-consteval-jit-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before RaiseSpecialOps (iree-global-opt-raise-special-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before VerifyInputLegality (iree-verify-input-legality) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before TensorPadToTensorInsertSlice (iree-flow-tensor-pad-to-tensor-insert-slice) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
}
// -----// IR Dump Before InterchangeGenericOps (iree-flow-interchange-generic-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FusionOfTensorOps (iree-flow-fusion-of-tensor-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before SplitReduction (iree-flow-split-reduction-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before InterchangeGenericOps (iree-flow-interchange-generic-ops) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FormScalarDispatches (iree-flow-form-scalar-dispatches) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FormDispatchRegions (iree-flow-form-dispatch-regions) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CloneProducersIntoDispatchRegions (iree-flow-clone-producers-into-dispatch-regions) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = flow.dispatch.region -> (tensor<8x32xf32>) {
%6 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.return %6 : tensor<8x32xf32>
}
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CollapseDimensions (iree-flow-collapse-dimensions) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = flow.dispatch.region -> (tensor<8x32xf32>) {
%6 = tensor.empty() : tensor<8x32xf32>
%cst_0 = arith.constant 0.000000e+00 : f32
%7 = linalg.fill ins(%cst_0 : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.return %8 : tensor<8x32xf32>
}
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before FormDispatchWorkgroups (iree-flow-form-dispatch-workgroups) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%cst = arith.constant 0.000000e+00 : f32
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = flow.dispatch.region -> (tensor<8x32xf32>) {
%6 = tensor.empty() : tensor<8x32xf32>
%cst_0 = arith.constant 0.000000e+00 : f32
%7 = linalg.fill ins(%cst_0 : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.return %8 : tensor<8x32xf32>
}
%5 = hal.tensor.export %4 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %5 : !hal.buffer_view
}
// -----// IR Dump Before CaptureDispatchDynamicDims (iree-flow-capture-dispatch-dynamic-dims) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%6 = tensor.empty() : tensor<8x32xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%4, %5 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%6 = tensor.empty() : tensor<8x32xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%4, %5 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%6 = tensor.empty() : tensor<8x32xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%4, %5 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before InitializeEmptyTensors (iree-flow-initialize-empty-tensors) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%6 = tensor.empty() : tensor<8x32xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%4, %5 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before OutlineDispatchExterns (iree-flow-outline-dispatch-externs) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%6 = tensor.empty() : tensor<8x32xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%4, %5 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before OutlineDispatchRegions (iree-flow-outline-dispatch-regions) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32> =
(%arg2: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg3: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%6 = tensor.empty() : tensor<8x32xf32>
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = linalg.matmul ins(%4, %5 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%7 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
flow.return
} count() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before AnnotateDispatches (iree-flow-annotate-dispatches) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before StripDebugOps (iree-util-strip-debug-ops) //----- //
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before DeduplicateExecutables (iree-flow-deduplicate-executables) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before CSE (cse) //----- //
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before CleanupTensorShapes (iree-flow-cleanup-tensor-shapes) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before SymbolDCE (symbol-dce) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before VerifyInputPass (iree-stream-verify-input) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before OutlineConstants (iree-util-outline-constants) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before ConvertToStreamPass (iree-stream-conversion) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
flow.executable private @matmul_static_dispatch_0 {
flow.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
flow.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !flow.dispatch.tensor<readonly:tensor<8x16xf32>>, %arg1: !flow.dispatch.tensor<readonly:tensor<16x32xf32>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>) {
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%2 = tensor.empty() : tensor<8x32xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<8x32xf32>) -> tensor<8x32xf32>
%4 = linalg.matmul ins(%0, %1 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%3 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<8x16xf32>
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<16x32xf32>
%2 = flow.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0, %1) : (tensor<8x16xf32>, tensor<16x32xf32>) -> tensor<8x32xf32>
%3 = hal.tensor.export %2 "output0" : tensor<8x32xf32> -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%cst = arith.constant 0.000000e+00 : f32
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
%c16_2 = arith.constant 16 : index
%c32 = arith.constant 32 : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16_2, %c32]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%c0 = arith.constant 0 : index
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
%c16_2 = arith.constant 16 : index
%c32 = arith.constant 32 : index
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16_2, %c32]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%c0 = arith.constant 0 : index
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
%element_type_f32_0 = hal.element_type<f32> : i32
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32_0) encoding(%dense_row_major_1)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%cst = arith.constant 0.000000e+00 : f32
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
}
// -----// IR Dump Before CombineInitializers (iree-util-combine-initializers) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
}
// -----// IR Dump Before EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- //
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.sizeof tensor<8x16xf32> : index
%1 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%0}
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} -> !stream.resource<*>{%0}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%3 = stream.tensor.sizeof tensor<16x32xf32> : index
%4 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%3}
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} -> !stream.resource<*>{%3}
%6 = stream.tensor.sizeof tensor<8x32xf32> : index
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6}
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} -> !stream.resource<external>{%6}
%9 = stream.tensor.export %8 : tensor<8x32xf32> in !stream.resource<external>{%6} -> !hal.buffer_view
return %9 : !hal.buffer_view
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before RefineUsagePass (iree-stream-refine-usage) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
%1 = stream.async.transfer %0 : !stream.resource<external>{%c512} -> !stream.resource<*>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%2 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%3 = stream.async.transfer %2 : !stream.resource<external>{%c2048} -> !stream.resource<*>{%c2048}
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%1[%c0 to %c512 for %c512], %3[%c0 to %c2048 for %c2048]) : (!stream.resource<*>{%c512}, !stream.resource<*>{%c2048}) -> !stream.resource<*>{%c1024}
%5 = stream.async.transfer %4 : !stream.resource<*>{%c1024} -> !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before ScheduleExecutionPass (iree-stream-schedule-execution) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%0[%c0 to %c512 for %c512], %1[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.timepoint.immediate => !stream.timepoint
%3 = stream.timepoint.immediate => !stream.timepoint
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint
%results, %result_timepoint = stream.async.execute await(%4) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %7 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%2 = stream.timepoint.immediate => !stream.timepoint
%3 = stream.timepoint.immediate => !stream.timepoint
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint
%results, %result_timepoint = stream.async.execute await(%4) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%7 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %7 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%6 = stream.tensor.export %5 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %6 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before ScheduleAllocationPass (iree-stream-schedule-allocation) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%results, %result_timepoint = stream.async.execute with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024} {
%4 = stream.async.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%arg2[%c0 to %c512 for %c512], %arg3[%c0 to %c2048 for %c2048]) : (!stream.resource<external>{%c512}, !stream.resource<external>{%c2048}) -> !stream.resource<external>{%c1024}
stream.yield %4 : !stream.resource<external>{%c1024}
} => !stream.timepoint
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c1024}
%3 = stream.tensor.export %2 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %3 : !hal.buffer_view
}
}
// -----// IR Dump Before PackConstantsPass (iree-stream-pack-constants) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before LayoutSlicesPass (iree-stream-layout-slices) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before PropagateSubranges (iree-util-propagate-subranges) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%c0_0 = arith.constant 0 : index
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before SCFToControlFlow (convert-scf-to-cf) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before FixedPointIterator (iree-util-fixed-point-iterator) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before ElideTimepointsPass (iree-stream-elide-timepoints) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie], iree.fixedpoint.iteration = 0 : index} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: index) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0, %c0, %c0 : index, index, index) {
ro %arg2[%c0_0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0_0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0, %c0, %c0 : index, index, index) {
ro %arg2[%c0_0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0_0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%c0_0 = arith.constant 0 : index
%c0_i64 = arith.constant 0 : i64
%c0_i32 = arith.constant 0 : i32
%c32_i64 = arith.constant 32 : i64
%c0_i64_1 = arith.constant 0 : i64
%c0_i32_2 = arith.constant 0 : i32
%c0_i64_3 = arith.constant 0 : i64
%c0_i32_4 = arith.constant 0 : i32
%c32_i64_5 = arith.constant 32 : i64
%c0_i64_6 = arith.constant 0 : i64
%c0_i32_7 = arith.constant 0 : i32
%c0_i64_8 = arith.constant 0 : i64
%c0_i32_9 = arith.constant 0 : i32
%c32_i64_10 = arith.constant 32 : i64
%c0_i64_11 = arith.constant 0 : i64
%c0_i32_12 = arith.constant 0 : i32
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %c0_i32_9, %c0_i32_12 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0_0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0_0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0_0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%c32_i64 = arith.constant 32 : i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%c32_i64_0 = arith.constant 32 : i64
%7 = arith.shli %6, %c32_i64_0 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%c32_i64_1 = arith.constant 32 : i64
%12 = arith.shli %11, %c32_i64_1 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%20 = tensor.empty() : tensor<8x32xf32>
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<8x32xf32>) -> tensor<8x32xf32>
%22 = linalg.matmul ins(%18, %19 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%21 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %22, %17, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%20 = tensor.empty() : tensor<8x32xf32>
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<8x32xf32>) -> tensor<8x32xf32>
%22 = linalg.matmul ins(%18, %19 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%21 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %22, %17, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%20 = tensor.empty() : tensor<8x32xf32>
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<8x32xf32>) -> tensor<8x32xf32>
%22 = linalg.matmul ins(%18, %19 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%21 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %22, %17, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%20 = tensor.empty() : tensor<8x32xf32>
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<8x32xf32>) -> tensor<8x32xf32>
%22 = linalg.matmul ins(%18, %19 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%21 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %22, %17, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) {
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %arg3 : i32 to i64
%1 = arith.extui %arg4 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %arg5 : i32 to i64
%6 = arith.extui %arg6 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %arg7 : i32 to i64
%11 = arith.extui %arg8 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%20 = tensor.empty() : tensor<8x32xf32>
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<8x32xf32>) -> tensor<8x32xf32>
%22 = linalg.matmul ins(%18, %19 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%21 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %22, %17, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c0_i32 = arith.constant 0 : i32
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%c0_i32 = arith.constant 0 : i32
%cst = arith.constant 0.000000e+00 : f32
%c32_i64 = arith.constant 32 : i64
%0 = arith.extui %c0_i32 : i32 to i64
%1 = arith.extui %c0_i32 : i32 to i64
%2 = arith.shli %1, %c32_i64 : i64
%3 = arith.ori %0, %2 : i64
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index
%5 = arith.extui %c0_i32 : i32 to i64
%6 = arith.extui %c0_i32 : i32 to i64
%7 = arith.shli %6, %c32_i64 : i64
%8 = arith.ori %5, %7 : i64
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index
%10 = arith.extui %c0_i32 : i32 to i64
%11 = arith.extui %c0_i32 : i32 to i64
%12 = arith.shli %11, %c32_i64 : i64
%13 = arith.ori %10, %12 : i64
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%20 = tensor.empty() : tensor<8x32xf32>
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<8x32xf32>) -> tensor<8x32xf32>
%22 = linalg.matmul ins(%18, %19 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%21 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %22, %17, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before IPO (iree-util-ipo) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before SymbolDCE (symbol-dce) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before CSE (cse) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before CSE (cse) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before AssignTargetDevicesPass (iree-hal-assign-target-devices) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before VerifyTargetEnvironmentPass (iree-hal-verify-target-environment) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before MaterializeInterfacesPass (iree-hal-materialize-interfaces) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
stream.executable private @matmul_static_dispatch_0 {
stream.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 workgroups() -> (index, index, index) {
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
stream.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before DumpExecutableSourcesPass (iree-hal-dump-executable-sources) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#pipeline_layout = #hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
hal.executable private @matmul_static_dispatch_0 {
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#pipeline_layout) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@amdaie_xclbin_fb::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
} attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before ConfigureExecutablesPass (iree-hal-configure-executables) //----- //
hal.executable private @matmul_static_dispatch_0 {
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
}
// -----// IR Dump Before ConfigureTargetExecutableVariantsPass (iree-hal-configure-target-executable-variants) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before CPUMaterializeUpperBoundTileSize (iree-codegen-cpu-materialize-upper-bound-tile-size) //----- //
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@amdaie_xclbin_fb::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
} attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
// -----// IR Dump Before DumpExecutableSourcesPass (iree-hal-dump-executable-sources) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#pipeline_layout = #hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
hal.executable private @matmul_static_dispatch_0 {
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#pipeline_layout) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@amdaie_xclbin_fb::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
} attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before DumpExecutableBenchmarksPass (iree-hal-dump-executable-benchmarks) //----- //
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>
#pipeline_layout = #hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>
#device_target_amd_aie = #hal.device.target<"amd-aie", {executable_targets = [#executable_target_amdaie_xclbin_fb], legacy_sync}>
module attributes {hal.device.targets = [#device_target_amd_aie]} {
hal.executable private @matmul_static_dispatch_0 {
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#pipeline_layout) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
}
func.func @matmul_static(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @matmul_static(%input0: tensor<8x16xf32>, %input1: tensor<16x32xf32>) -> (%output0: tensor<8x32xf32>)"}} {
%c1024 = arith.constant 1024 : index
%c2048 = arith.constant 2048 : index
%c512 = arith.constant 512 : index
%c0 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c16 = arith.constant 16 : index
%c8 = arith.constant 8 : index
%element_type_f32 = hal.element_type<f32> : i32
%dense_row_major = hal.encoding_type<dense_row_major> : i32
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c8, %c16]) type(%element_type_f32) encoding(%dense_row_major)
%0 = stream.tensor.import %arg0 : !hal.buffer_view -> tensor<8x16xf32> in !stream.resource<external>{%c512}
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c16, %c32]) type(%element_type_f32) encoding(%dense_row_major)
%1 = stream.tensor.import %arg1 : !hal.buffer_view -> tensor<16x32xf32> in !stream.resource<external>{%c2048}
%result, %result_timepoint = stream.resource.alloca uninitialized : !stream.resource<external>{%c1024} => !stream.timepoint
%2 = stream.cmd.execute await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c512}, %1 as %arg3: !stream.resource<external>{%c2048}, %result as %arg4: !stream.resource<external>{%c1024}) {
stream.cmd.dispatch @matmul_static_dispatch_0::@amdaie_xclbin_fb::@matmul_static_dispatch_0_matmul_8x32x16_f32 {
ro %arg2[%c0 for %c512] : !stream.resource<external>{%c512},
ro %arg3[%c0 for %c2048] : !stream.resource<external>{%c2048},
wo %arg4[%c0 for %c1024] : !stream.resource<external>{%c1024}
} attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]}
} => !stream.timepoint
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c1024}
%4 = stream.tensor.export %3 : tensor<8x32xf32> in !stream.resource<external>{%c1024} -> !hal.buffer_view
return %4 : !hal.buffer_view
}
}
// -----// IR Dump Before TranslateExecutablesPass (iree-hal-translate-executables) //----- //
hal.executable private @matmul_static_dispatch_0 {
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
}
// -----// IR Dump Before TranslateTargetExecutableVariantsPass (iree-hal-translate-target-executable-variants) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before TypePropagation (iree-codegen-type-propagation) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before BubbleUpOrdinalOps (iree-codegen-bubble-up-ordinal-ops) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before BufferizeCopyOnlyDispatches (iree-codegen-bufferize-copy-only-dispatches) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before DecomposeSoftmax (iree-codegen-decompose-softmax) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before MaterializeUserConfigs (iree-codegen-materialize-user-configs) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before AMDAIELoweringStrategy (iree-amdaie-lowering-strategy) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before AMDAIELowerExecutableTarget (iree-amdaie-lower-executable-target) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) attributes {translation_info = #iree_codegen.translation_info<None>} {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<8x32xf32>) -> tensor<8x32xf32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%3, %4 : tensor<8x16xf32>, tensor<16x32xf32>) outs(%6 : tensor<8x32xf32>) -> tensor<8x32xf32>
%8 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%9 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%extracted_slice_2 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%10 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%extracted_slice, %extracted_slice_0 : tensor<8x16xf32>, tensor<16x16xf32>) outs(%9 : tensor<8x16xf32>) -> tensor<8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %10 into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %8, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%8 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%extracted_slice, %extracted_slice_0 : tensor<8x16xf32>, tensor<16x16xf32>) outs(%7 : tensor<8x16xf32>) -> tensor<8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%8 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%extracted_slice, %extracted_slice_0 : tensor<8x16xf32>, tensor<16x16xf32>) outs(%7 : tensor<8x16xf32>) -> tensor<8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before AMDAIEPackAndTranspose (iree-amdaie-pack-and-transpose) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%8 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} ins(%extracted_slice, %extracted_slice_0 : tensor<8x16xf32>, tensor<16x16xf32>) outs(%7 : tensor<8x16xf32>) -> tensor<8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%8 = tensor.empty() : tensor<1x1x8x16xf32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %8 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%9 = tensor.empty() : tensor<1x1x16x16xf32>
%10 = tensor.empty() : tensor<1x1x16x16xf32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %10 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%11 = tensor.empty() : tensor<1x1x8x16xf32>
%pack_3 = tensor.pack %7 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %11 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%pack_3 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_4: f32, %out: f32):
%13 = arith.mulf %in, %in_4 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<1x1x8x16xf32>
%unpack = tensor.unpack %12 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%8 = tensor.empty() : tensor<1x1x8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %9 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%10 = tensor.empty() : tensor<1x1x16x16xf32>
%11 = tensor.empty() : tensor<1x1x16x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%13 = tensor.empty() : tensor<1x1x8x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%14 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack_5 = tensor.pack %7 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %14 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_3 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%pack_5 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_6: f32, %out: f32):
%16 = arith.mulf %in, %in_6 : f32
%17 = arith.addf %out, %16 : f32
linalg.yield %17 : f32
} -> tensor<1x1x8x16xf32>
%unpack = tensor.unpack %15 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%8 = tensor.empty() : tensor<1x1x8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %9 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%10 = tensor.empty() : tensor<1x1x16x16xf32>
%11 = tensor.empty() : tensor<1x1x16x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %12 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%13 = tensor.empty() : tensor<1x1x8x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%14 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack_5 = tensor.pack %7 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %14 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_3 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%pack_5 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_6: f32, %out: f32):
%17 = arith.mulf %in, %in_6 : f32
%18 = arith.addf %out, %17 : f32
linalg.yield %18 : f32
} -> tensor<1x1x8x16xf32>
%16 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %pack_5) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_6 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_7 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_8 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_6, %extracted_slice_7 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%extracted_slice_8 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_9: f32, %out: f32):
%18 = arith.mulf %in, %in_9 : f32
%19 = arith.addf %out, %18 : f32
linalg.yield %19 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %16 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<8x16xf32>) -> tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %8 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %9 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%10 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %11) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_8: f32, %out: f32):
%14 = arith.mulf %in, %in_8 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %12 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%11 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %10) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_8: f32, %out: f32):
%13 = arith.mulf %in, %in_8 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %12 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %11 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before AMDAIEFuseFillIntoForall (iree-amdaie-fuse-fill-into-forall) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%11 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %10) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_8: f32, %out: f32):
%13 = arith.mulf %in, %in_8 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %12 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %11 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%11 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%12 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%extracted_slice_8 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%13 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%12 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_9: f32, %out: f32):
%14 = arith.mulf %in, %in_9 : f32
%15 = arith.addf %out, %14 : f32
linalg.yield %15 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %11 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%11 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_8: f32, %out: f32):
%13 = arith.mulf %in, %in_8 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %12 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%11 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_8: f32, %out: f32):
%13 = arith.mulf %in, %in_8 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %12 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before AMDAIEPackAndTranspose (iree-amdaie-pack-and-transpose) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_5, %extracted_slice_6 : tensor<1x1x8x16xf32>, tensor<1x1x16x16xf32>) outs(%11 : tensor<1x1x8x16xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[8, 16], [1, 1], [0, 0, 1]]>} {
^bb0(%in: f32, %in_8: f32, %out: f32):
%13 = arith.mulf %in, %in_8 : f32
%14 = arith.addf %out, %13 : f32
linalg.yield %14 : f32
} -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %12 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%13 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%pack_8 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %13 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%14 = tensor.empty() : tensor<1x1x2x2x8x8xf32>
%15 = tensor.empty() : tensor<1x1x2x2x8x8xf32>
%pack_9 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %15 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%16 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%17 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%pack_10 = tensor.pack %11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_8, %pack_9 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%pack_10 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_12: f32, %out: f32):
%19 = arith.mulf %in, %in_12 : f32
%20 = arith.addf %out, %19 : f32
linalg.yield %20 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_11 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_11 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%13 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %14 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%15 = tensor.empty() : tensor<1x1x2x2x8x8xf32>
%16 = tensor.empty() : tensor<1x1x2x2x8x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%17 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %17 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%18 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%19 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%20 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_13 = tensor.pack %11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%21 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%pack_13 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_15: f32, %out: f32):
%22 = arith.mulf %in, %in_15 : f32
%23 = arith.addf %out, %22 : f32
linalg.yield %23 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_14 = tensor.unpack %21 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_14 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%12 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%13 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %14 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%15 = tensor.empty() : tensor<1x1x2x2x8x8xf32>
%16 = tensor.empty() : tensor<1x1x2x2x8x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%17 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %17 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%18 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%19 = tensor.empty() : tensor<1x1x2x2x4x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%20 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_13 = tensor.pack %11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%21 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%pack_13 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_15: f32, %out: f32):
%22 = arith.mulf %in, %in_15 : f32
%23 = arith.addf %out, %22 : f32
linalg.yield %23 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_14 = tensor.unpack %21 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_14 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%11 = linalg.fill ins(%cst : f32) outs(%extracted_slice_7 : tensor<1x1x8x16xf32>) -> tensor<1x1x8x16xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%12 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %12 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%13 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %13 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%15 = linalg.fill ins(%cst : f32) outs(%14 : tensor<1x1x2x2x4x8xf32>) -> tensor<1x1x2x2x4x8xf32>
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%15 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%17 = arith.mulf %in, %in_14 : f32
%18 = arith.addf %out, %17 : f32
linalg.yield %18 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_13 = tensor.unpack %16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%11 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%12 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %12 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%13 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<1x1x2x2x4x8xf32>) -> tensor<1x1x2x2x4x8xf32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%14 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%16 = arith.mulf %in, %in_14 : f32
%17 = arith.addf %out, %16 : f32
linalg.yield %17 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_13 = tensor.unpack %15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before EliminateEmptyTensors (iree-eliminate-empty-tensors) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = tensor.empty() : tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%11 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%12 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %12 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%13 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<1x1x2x2x4x8xf32>) -> tensor<1x1x2x2x4x8xf32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%14 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%16 = arith.mulf %in, %in_14 : f32
%17 = arith.addf %out, %16 : f32
linalg.yield %17 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_13 = tensor.unpack %15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before EmptyTensorToAllocTensor (empty-tensor-to-alloc-tensor) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>> -> tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%11 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%12 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %12 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%13 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<1x1x2x2x4x8xf32>) -> tensor<1x1x2x2x4x8xf32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%14 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%16 = arith.mulf %in, %in_14 : f32
%17 = arith.addf %out, %16 : f32
linalg.yield %17 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_13 = tensor.unpack %15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before IREEComprehensiveBufferize (iree-codegen-iree-comprehensive-bufferize) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<8x16xf32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<16x32xf32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [8, 16], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<8x16xf32>> -> tensor<8x16xf32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [16, 32], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<16x32xf32>> -> tensor<16x32xf32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : !flow.dispatch.tensor<writeonly:tensor<8x32xf32>> -> tensor<8x32xf32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) shared_outs(%arg2 = %5) -> (tensor<8x32xf32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [8, 16] [1, 1] : tensor<8x16xf32> to tensor<8x16xf32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [16, 16] [1, 1] : tensor<16x32xf32> to tensor<16x16xf32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x32xf32> to tensor<8x16xf32>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x16xf32, 1 : i32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %7 : tensor<8x16xf32> -> tensor<1x1x8x16xf32>
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x1x16x16xf32, 1 : i32>
%pack_3 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %8 : tensor<16x16xf32> -> tensor<1x1x16x16xf32>
%alloc_4 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%9 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x1x8x16xf32, 1 : i32>
%10 = scf.forall (%arg3, %arg4) in (1, 1) shared_outs(%arg5 = %9) -> (tensor<1x1x8x16xf32>) {
%extracted_slice_5 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%extracted_slice_6 = tensor.extract_slice %pack_3[0, %arg4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : tensor<1x1x16x16xf32> to tensor<1x1x16x16xf32>
%extracted_slice_7 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> to tensor<1x1x8x16xf32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%11 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%pack_9 = tensor.pack %extracted_slice_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<1x1x8x16xf32> -> tensor<1x1x2x2x4x8xf32>
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%12 = bufferization.to_tensor %alloc_10 restrict writable : memref<1x1x2x2x8x8xf32, 2 : i32>
%pack_11 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %12 : tensor<1x1x16x16xf32> -> tensor<1x1x2x2x8x8xf32>
%alloc_12 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%13 = bufferization.to_tensor %alloc_12 restrict writable : memref<1x1x2x2x4x8xf32, 2 : i32>
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<1x1x2x2x4x8xf32>) -> tensor<1x1x2x2x4x8xf32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_9, %pack_11 : tensor<1x1x2x2x4x8xf32>, tensor<1x1x2x2x8x8xf32>) outs(%14 : tensor<1x1x2x2x4x8xf32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%16 = arith.mulf %in, %in_14 : f32
%17 = arith.addf %out, %16 : f32
linalg.yield %17 : f32
} -> tensor<1x1x2x2x4x8xf32>
%unpack_13 = tensor.unpack %15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<1x1x2x2x4x8xf32> -> tensor<1x1x8x16xf32>
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_13 into %arg5[%arg3, %arg4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : tensor<1x1x8x16xf32> into tensor<1x1x8x16xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
%unpack = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %extracted_slice_1 : tensor<1x1x8x16xf32> -> tensor<8x16xf32>
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x16xf32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [8, 16] [1, 1] : tensor<8x16xf32> into tensor<8x32xf32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [8, 32], strides = [1, 1] : tensor<8x32xf32> -> !flow.dispatch.tensor<writeonly:tensor<8x32xf32>>
return
}
}
// -----// IR Dump Before ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_5 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_8 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_9 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_10 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_8, %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_12: f32, %out: f32):
%3 = arith.mulf %in, %in_12 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_7 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>
%subview_11 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_7 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) outs(%subview_11 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
%subview_4 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_1 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_4 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%2 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>) outs(%2 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_5 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_8 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_9 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_10 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_8, %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_12: f32, %out: f32):
%3 = arith.mulf %in, %in_12 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_7 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>
%subview_11 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_7 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) outs(%subview_11 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
%subview_4 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_1 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_4 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%2 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>) outs(%2 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_5 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_8 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_6 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_9 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_10 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_8, %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_12: f32, %out: f32):
%3 = arith.mulf %in, %in_12 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_7 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>
%subview_11 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_7 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) outs(%subview_11 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
%subview_4 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_1 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_4 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_6 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) outs(%subview_6 : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_1 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_1 : memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: f32, %out: f32):
linalg.yield %in : f32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
// -----// IR Dump Before CleanupBufferAllocView (iree-codegen-cleanup-buffer-alloc-view) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) attributes {translation_info = #iree_codegen.translation_info<None>} {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
}
}
// -----// IR Dump Before CSE (cse) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) attributes {translation_info = #iree_codegen.translation_info<None>} {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
}
}
// -----// IR Dump Before AMDAIELowerWorkgroupCount (iree-amdaie-lower-workgroup-count) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd"}>) {
hal.executable.export public @matmul_static_dispatch_0_matmul_8x32x16_f32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) attributes {translation_info = #iree_codegen.translation_info<None>} {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
}
}
// -----// IR Dump Before EraseHALDescriptorTypeFromMemRef (iree-codegen-erase-hal-descriptor-type-from-memref) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<8x16xf32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<16x32xf32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<8x32xf32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32, #hal.descriptor_type<storage_buffer>> to memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32, #hal.descriptor_type<storage_buffer>> to memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
}
// -----// IR Dump Before AMDAIEBridgeToAIR (iree-amdaie-bridge-to-air) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
scf.forall (%arg0, %arg1) = (0, 0) to (8, 32) step (8, 16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.forall (%arg2, %arg3) in (1, 1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
return
}
}
// -----// IR Dump Before FoldMemRefAliasOps (fold-memref-alias-ops) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c1 = arith.constant 1 : index
%c16 = arith.constant 16 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
scf.parallel (%arg0, %arg1) = (%c0, %c0) to (%c8, %c32) step (%c8, %c16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%c1, %c1) step (%c1, %c1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.reduce
}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
scf.reduce
}
return
}
}
// -----// IR Dump Before AMDAIEDecomposeLinalgExtPackUnPackToAIR (iree-amdaie-decompose-pack-unpack-to-air) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c1 = arith.constant 1 : index
%c16 = arith.constant 16 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
scf.parallel (%arg0, %arg1) = (%c0, %c0) to (%c8, %c32) step (%c8, %c16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
iree_linalg_ext.pack %subview inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %alloc : (memref<8x16xf32, strided<[16, 1], offset: ?>> memref<1x1x8x16xf32, 1 : i32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
iree_linalg_ext.pack %subview_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %alloc_2 : (memref<16x16xf32, strided<[32, 1], offset: ?>> memref<1x1x16x16xf32, 1 : i32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%c1, %c1) step (%c1, %c1) {
%subview_4 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_5 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_7 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x4x8xf32, 2 : i32>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> memref<1x1x2x2x8x8xf32, 2 : i32>)
%alloc_9 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_7, %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_10: f32, %out: f32):
%3 = arith.mulf %in, %in_10 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
iree_linalg_ext.unpack %alloc_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %subview_6 : (memref<1x1x2x2x4x8xf32, 2 : i32> memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>)
memref.dealloc %alloc_7 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_8 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_9 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.reduce
}
iree_linalg_ext.unpack %alloc_3 inner_dims_pos = [0, 1] inner_tiles = [8, 16] into %subview_1 : (memref<1x1x8x16xf32, 1 : i32> memref<8x16xf32, strided<[32, 1], offset: ?>>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
scf.reduce
}
return
}
}
// -----// IR Dump Before ParallelToHerd (air-par-to-herd) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c1 = arith.constant 1 : index
%c16 = arith.constant 16 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
scf.parallel (%arg0, %arg1) = (%c0, %c0) to (%c8, %c32) step (%c8, %c16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc[] [] [], %subview[] [] []) : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32, strided<[16, 1], offset: ?>>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc_2[] [] [], %subview_0[] [] []) : (memref<1x1x16x16xf32, 1 : i32>, memref<16x16xf32, strided<[32, 1], offset: ?>>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%c1, %c1) step (%c1, %c1) {
%subview_5 = memref.subview %alloc[%arg2, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_6 = memref.subview %alloc_2[0, %arg3, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_8 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%expand_shape = memref.expand_shape %subview_5 [[0], [1], [2, 3], [4, 5]] : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_9 = memref.transpose %expand_shape (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>
air.dma_memcpy_nd (%alloc_8[] [] [], %transpose_9[] [] []) : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>)
%alloc_10 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%expand_shape_11 = memref.expand_shape %subview_6 [[0], [1], [2, 3], [4, 5]] : memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_12 = memref.transpose %expand_shape_11 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>
air.dma_memcpy_nd (%alloc_10[] [] [], %transpose_12[] [] []) : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>)
%alloc_13 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_8, %alloc_10 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_15: f32, %out: f32):
%3 = arith.mulf %in, %in_15 : f32
%4 = arith.addf %out, %3 : f32
linalg.yield %4 : f32
}
%transpose_14 = memref.transpose %alloc_13 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4, d2, d5) : memref<1x1x2x2x4x8xf32, 2 : i32> to memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>
air.dma_memcpy_nd (%subview_7[] [] [], %transpose_14[] [] []) : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>, memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>)
memref.dealloc %alloc_8 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_10 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>
scf.reduce
}
%subview_4 = memref.subview %alloc_3[0, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<8x16xf32, 1 : i32>
%transpose = memref.transpose %subview_4 (d0, d1) -> (d0, d1) : memref<8x16xf32, 1 : i32> to memref<8x16xf32, strided<[16, 1]>, 1 : i32>
air.dma_memcpy_nd (%subview_1[] [] [], %transpose[] [] []) : (memref<8x16xf32, strided<[32, 1], offset: ?>>, memref<8x16xf32, strided<[16, 1]>, 1 : i32>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
scf.reduce
}
return
}
}
// -----// IR Dump Before ParallelToLaunch (air-par-to-launch) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c1 = arith.constant 1 : index
%c16 = arith.constant 16 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
scf.parallel (%arg0, %arg1) = (%c0, %c0) to (%c8, %c32) step (%c8, %c16) {
%subview = memref.subview %0[%arg0, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_0 = memref.subview %1[0, %arg1] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_1 = memref.subview %2[%arg0, %arg1] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc[] [] [], %subview[] [] []) : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32, strided<[16, 1], offset: ?>>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc_2[] [] [], %subview_0[] [] []) : (memref<1x1x16x16xf32, 1 : i32>, memref<16x16xf32, strided<[32, 1], offset: ?>>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%c1_4 = arith.constant 1 : index
%c0_5 = arith.constant 0 : index
%c1_6 = arith.constant 1 : index
%c1_7 = arith.constant 1 : index
%c0_8 = arith.constant 0 : index
%c1_9 = arith.constant 1 : index
%c1_10 = arith.constant 1 : index
%c1_11 = arith.constant 1 : index
air.herd @herd_0 tile (%arg2, %arg3) in (%arg4=%c1_10, %arg5=%c1_11) args(%arg6=%alloc, %arg7=%alloc_2, %arg8=%alloc_3) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> {
%cst_13 = arith.constant 0.000000e+00 : f32
%3 = affine.apply affine_map<(d0) -> (d0)>(%arg2)
%4 = affine.apply affine_map<(d0) -> (d0)>(%arg3)
%subview_14 = memref.subview %arg6[%3, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_15 = memref.subview %arg7[0, %4, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_16 = memref.subview %arg8[%3, %4, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_17 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%expand_shape = memref.expand_shape %subview_14 [[0], [1], [2, 3], [4, 5]] : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_18 = memref.transpose %expand_shape (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>
air.dma_memcpy_nd (%alloc_17[] [] [], %transpose_18[] [] []) : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>)
%alloc_19 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%expand_shape_20 = memref.expand_shape %subview_15 [[0], [1], [2, 3], [4, 5]] : memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_21 = memref.transpose %expand_shape_20 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>
air.dma_memcpy_nd (%alloc_19[] [] [], %transpose_21[] [] []) : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>)
%alloc_22 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst_13 : f32) outs(%alloc_22 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_17, %alloc_19 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_22 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_24: f32, %out: f32):
%5 = arith.mulf %in, %in_24 : f32
%6 = arith.addf %out, %5 : f32
linalg.yield %6 : f32
}
%transpose_23 = memref.transpose %alloc_22 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4, d2, d5) : memref<1x1x2x2x4x8xf32, 2 : i32> to memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>
air.dma_memcpy_nd (%subview_16[] [] [], %transpose_23[] [] []) : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>, memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>)
memref.dealloc %alloc_17 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_19 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_22 : memref<1x1x2x2x4x8xf32, 2 : i32>
air.herd_terminator
}
%subview_12 = memref.subview %alloc_3[0, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<8x16xf32, 1 : i32>
%transpose = memref.transpose %subview_12 (d0, d1) -> (d0, d1) : memref<8x16xf32, 1 : i32> to memref<8x16xf32, strided<[16, 1]>, 1 : i32>
air.dma_memcpy_nd (%subview_1[] [] [], %transpose[] [] []) : (memref<8x16xf32, strided<[32, 1], offset: ?>>, memref<8x16xf32, strided<[16, 1]>, 1 : i32>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
scf.reduce
}
return
}
}
// -----// IR Dump Before CopyToDma (air-copy-to-dma) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c1 = arith.constant 1 : index
%c16 = arith.constant 16 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
%c1_0 = arith.constant 1 : index
%c0_1 = arith.constant 0 : index
%c1_2 = arith.constant 1 : index
%c2 = arith.constant 2 : index
%c0_3 = arith.constant 0 : index
%c1_4 = arith.constant 1 : index
%c1_5 = arith.constant 1 : index
%c2_6 = arith.constant 2 : index
air.launch (%arg0, %arg1) in (%arg2=%c1_5, %arg3=%c2_6) args(%arg4=%0, %arg5=%1, %arg6=%2) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
air.segment @segment_0 args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg2, %arg10=%arg3, %arg11=%arg4, %arg12=%arg5, %arg13=%arg6) : index, index, index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
%3 = affine.apply affine_map<(d0) -> (d0 * 8)>(%arg7)
%4 = affine.apply affine_map<(d0) -> (d0 * 16)>(%arg8)
%subview = memref.subview %arg11[%3, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_7 = memref.subview %arg12[0, %4] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_8 = memref.subview %arg13[%3, %4] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc[] [] [], %subview[] [] []) : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32, strided<[16, 1], offset: ?>>)
%alloc_9 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc_9[] [] [], %subview_7[] [] []) : (memref<1x1x16x16xf32, 1 : i32>, memref<16x16xf32, strided<[32, 1], offset: ?>>)
%alloc_10 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%c1_11 = arith.constant 1 : index
%c0_12 = arith.constant 0 : index
%c1_13 = arith.constant 1 : index
%c1_14 = arith.constant 1 : index
%c0_15 = arith.constant 0 : index
%c1_16 = arith.constant 1 : index
%c1_17 = arith.constant 1 : index
%c1_18 = arith.constant 1 : index
air.herd @herd_0 tile (%arg14, %arg15) in (%arg16=%c1_17, %arg17=%c1_18) args(%arg18=%alloc, %arg19=%alloc_9, %arg20=%alloc_10) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> {
%cst_20 = arith.constant 0.000000e+00 : f32
%5 = affine.apply affine_map<(d0) -> (d0)>(%arg14)
%6 = affine.apply affine_map<(d0) -> (d0)>(%arg15)
%subview_21 = memref.subview %arg18[%5, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_22 = memref.subview %arg19[0, %6, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_23 = memref.subview %arg20[%5, %6, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_24 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%expand_shape = memref.expand_shape %subview_21 [[0], [1], [2, 3], [4, 5]] : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_25 = memref.transpose %expand_shape (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>
air.dma_memcpy_nd (%alloc_24[] [] [], %transpose_25[] [] []) : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>)
%alloc_26 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%expand_shape_27 = memref.expand_shape %subview_22 [[0], [1], [2, 3], [4, 5]] : memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_28 = memref.transpose %expand_shape_27 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>
air.dma_memcpy_nd (%alloc_26[] [] [], %transpose_28[] [] []) : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>)
%alloc_29 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst_20 : f32) outs(%alloc_29 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_24, %alloc_26 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_29 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_31: f32, %out: f32):
%7 = arith.mulf %in, %in_31 : f32
%8 = arith.addf %out, %7 : f32
linalg.yield %8 : f32
}
%transpose_30 = memref.transpose %alloc_29 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4, d2, d5) : memref<1x1x2x2x4x8xf32, 2 : i32> to memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>
air.dma_memcpy_nd (%subview_23[] [] [], %transpose_30[] [] []) : (memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>, memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>)
memref.dealloc %alloc_24 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_26 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_29 : memref<1x1x2x2x4x8xf32, 2 : i32>
air.herd_terminator
}
%subview_19 = memref.subview %alloc_10[0, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<8x16xf32, 1 : i32>
%transpose = memref.transpose %subview_19 (d0, d1) -> (d0, d1) : memref<8x16xf32, 1 : i32> to memref<8x16xf32, strided<[16, 1]>, 1 : i32>
air.dma_memcpy_nd (%subview_8[] [] [], %transpose[] [] []) : (memref<8x16xf32, strided<[32, 1], offset: ?>>, memref<8x16xf32, strided<[16, 1]>, 1 : i32>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_9 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_10 : memref<1x1x8x16xf32, 1 : i32>
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
air.launch (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%0, %arg5=%1, %arg6=%2) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
air.segment @segment_0 args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg2, %arg10=%arg3, %arg11=%arg4, %arg12=%arg5, %arg13=%arg6) : index, index, index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
%c1_0 = arith.constant 1 : index
%3 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
%4 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
%subview = memref.subview %arg11[%3, 0] [8, 16] [1, 1] : memref<8x16xf32> to memref<8x16xf32, strided<[16, 1], offset: ?>>
%subview_1 = memref.subview %arg12[0, %4] [16, 16] [1, 1] : memref<16x32xf32> to memref<16x16xf32, strided<[32, 1], offset: ?>>
%subview_2 = memref.subview %arg13[%3, %4] [8, 16] [1, 1] : memref<8x32xf32> to memref<8x16xf32, strided<[32, 1], offset: ?>>
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
%c0_3 = arith.constant 0 : index
%c16 = arith.constant 16 : index
%c1_4 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c16_5 = arith.constant 16 : index
air.dma_memcpy_nd (%alloc[] [] [], %arg11[%3, %c0_3] [%c8, %c16_5] [%c16, %c1_4]) : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32>)
%alloc_6 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
%c0_7 = arith.constant 0 : index
%c32 = arith.constant 32 : index
%c1_8 = arith.constant 1 : index
%c16_9 = arith.constant 16 : index
%c16_10 = arith.constant 16 : index
air.dma_memcpy_nd (%alloc_6[] [] [], %arg12[%c0_7, %4] [%c16_9, %c16_10] [%c32, %c1_8]) : (memref<1x1x16x16xf32, 1 : i32>, memref<16x32xf32>)
%alloc_11 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.herd @herd_0 tile (%arg14, %arg15) in (%arg16=%c1_0, %arg17=%c1_0) args(%arg18=%alloc, %arg19=%alloc_6, %arg20=%alloc_11) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> {
%cst = arith.constant 0.000000e+00 : f32
%subview_28 = memref.subview %arg18[%arg14, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%subview_29 = memref.subview %arg19[0, %arg15, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xf32, 1 : i32> to memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32>
%subview_30 = memref.subview %arg20[%arg14, %arg15, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32>
%alloc_31 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
%expand_shape = memref.expand_shape %subview_28 [[0], [1], [2, 3], [4, 5]] : memref<1x1x8x16xf32, strided<[128, 128, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_32 = memref.transpose %expand_shape (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x4x2x8xf32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x4x8xf32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1 : i32>
%c0_33 = arith.constant 0 : index
%c0_34 = arith.constant 0 : index
%c0_35 = arith.constant 0 : index
%c128_36 = arith.constant 128 : index
%c128_37 = arith.constant 128 : index
%c8_38 = arith.constant 8 : index
%c64 = arith.constant 64 : index
%c16_39 = arith.constant 16 : index
%c1_40 = arith.constant 1 : index
%c1_41 = arith.constant 1 : index
%c1_42 = arith.constant 1 : index
%c2_43 = arith.constant 2 : index
%c2_44 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8_45 = arith.constant 8 : index
air.dma_memcpy_nd (%alloc_31[] [] [], %arg18[%arg14, %c0_33, %c0_34, %c0_35] [%c2_43, %c2_44, %c4, %c8_45] [%c8_38, %c64, %c16_39, %c1_40]) : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x8x16xf32, 1 : i32>)
%alloc_46 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
%expand_shape_47 = memref.expand_shape %subview_29 [[0], [1], [2, 3], [4, 5]] : memref<1x1x16x16xf32, strided<[256, 256, 16, 1], offset: ?>, 1 : i32> into memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32>
%transpose_48 = memref.transpose %expand_shape_47 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x8x2x8xf32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1 : i32> to memref<1x1x2x2x8x8xf32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1 : i32>
%c0_49 = arith.constant 0 : index
%c0_50 = arith.constant 0 : index
%c0_51 = arith.constant 0 : index
%c256 = arith.constant 256 : index
%c256_52 = arith.constant 256 : index
%c8_53 = arith.constant 8 : index
%c128_54 = arith.constant 128 : index
%c16_55 = arith.constant 16 : index
%c1_56 = arith.constant 1 : index
%c1_57 = arith.constant 1 : index
%c1_58 = arith.constant 1 : index
%c2_59 = arith.constant 2 : index
%c2_60 = arith.constant 2 : index
%c8_61 = arith.constant 8 : index
%c8_62 = arith.constant 8 : index
air.dma_memcpy_nd (%alloc_46[] [] [], %arg19[%c0_49, %arg15, %c0_50, %c0_51] [%c2_59, %c2_60, %c8_61, %c8_62] [%c8_53, %c128_54, %c16_55, %c1_56]) : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x16x16xf32, 1 : i32>)
%alloc_63 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_63 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_31, %alloc_46 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_63 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_93: f32, %out: f32):
%5 = arith.mulf %in, %in_93 : f32
%6 = arith.addf %out, %5 : f32
linalg.yield %6 : f32
}
%transpose_64 = memref.transpose %alloc_63 (d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4, d2, d5) : memref<1x1x2x2x4x8xf32, 2 : i32> to memref<1x1x2x4x2x8xf32, strided<[128, 128, 32, 8, 64, 1]>, 2 : i32>
%c0_65 = arith.constant 0 : index
%c0_66 = arith.constant 0 : index
%c128_67 = arith.constant 128 : index
%c128_68 = arith.constant 128 : index
%c32_69 = arith.constant 32 : index
%c8_70 = arith.constant 8 : index
%c64_71 = arith.constant 64 : index
%c1_72 = arith.constant 1 : index
%c1_73 = arith.constant 1 : index
%c1_74 = arith.constant 1 : index
%c2_75 = arith.constant 2 : index
%c4_76 = arith.constant 4 : index
%c2_77 = arith.constant 2 : index
%c8_78 = arith.constant 8 : index
%c128_79 = arith.constant 128 : index
%c128_80 = arith.constant 128 : index
%c16_81 = arith.constant 16 : index
%c1_82 = arith.constant 1 : index
%c1_83 = arith.constant 1 : index
%c1_84 = arith.constant 1 : index
%c8_85 = arith.constant 8 : index
%c16_86 = arith.constant 16 : index
%c0_87 = arith.constant 0 : index
%c0_88 = arith.constant 0 : index
%c0_89 = arith.constant 0 : index
%c0_90 = arith.constant 0 : index
%c0_91 = arith.constant 0 : index
%c0_92 = arith.constant 0 : index
air.dma_memcpy_nd (%arg20[%arg14, %arg15, %c0_65, %c0_66] [%c1_83, %c1_84, %c8_85, %c16_86] [%c128_79, %c128_80, %c16_81, %c1_82], %alloc_63[%c0_92, %c0_91, %c0_90, %c0_89, %c0_88, %c0_87] [%c1_73, %c1_74, %c2_75, %c4_76, %c2_77, %c8_78] [%c128_67, %c128_68, %c32_69, %c8_70, %c64_71, %c1_72]) : (memref<1x1x8x16xf32, 1 : i32>, memref<1x1x2x2x4x8xf32, 2 : i32>)
memref.dealloc %alloc_31 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_46 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_63 : memref<1x1x2x2x4x8xf32, 2 : i32>
air.herd_terminator
}
%subview_12 = memref.subview %alloc_11[0, 0, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xf32, 1 : i32> to memref<8x16xf32, 1 : i32>
%transpose = memref.transpose %subview_12 (d0, d1) -> (d0, d1) : memref<8x16xf32, 1 : i32> to memref<8x16xf32, strided<[16, 1]>, 1 : i32>
%c0_13 = arith.constant 0 : index
%c0_14 = arith.constant 0 : index
%c0_15 = arith.constant 0 : index
%c0_16 = arith.constant 0 : index
%c16_17 = arith.constant 16 : index
%c1_18 = arith.constant 1 : index
%c8_19 = arith.constant 8 : index
%c16_20 = arith.constant 16 : index
%c32_21 = arith.constant 32 : index
%c1_22 = arith.constant 1 : index
%c8_23 = arith.constant 8 : index
%c16_24 = arith.constant 16 : index
%c1_25 = arith.constant 1 : index
%c1_26 = arith.constant 1 : index
%c128 = arith.constant 128 : index
%c128_27 = arith.constant 128 : index
air.dma_memcpy_nd (%arg13[%3, %4] [%c8_23, %c16_24] [%c32_21, %c1_22], %alloc_11[%c0_13, %c0_14, %c0_15, %c0_16] [%c1_26, %c1_25, %c8_19, %c16_20] [%c128_27, %c128, %c16_17, %c1_18]) : (memref<8x32xf32>, memref<1x1x8x16xf32, 1 : i32>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_6 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_11 : memref<1x1x8x16xf32, 1 : i32>
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
air.launch (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%0, %arg5=%1, %arg6=%2) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
air.segment @segment_0 args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg4, %arg10=%arg5, %arg11=%arg6) : index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
%c128 = arith.constant 128 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_0 = arith.constant 0 : index
%c1_1 = arith.constant 1 : index
%3 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
%4 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc[] [] [], %arg9[%3, %c0_0] [%c8, %c16] [%c16, %c1_1]) : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc_2[] [] [], %arg10[%c0_0, %4] [%c16, %c16] [%c32, %c1_1]) : (memref<1x1x16x16xf32, 1 : i32>, memref<16x32xf32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.herd @herd_0 tile (%arg12, %arg13) in (%arg14=%c1_1, %arg15=%c1_1) args(%arg16=%alloc, %arg17=%alloc_2, %arg18=%alloc_3) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> {
%c32_4 = arith.constant 32 : index
%c4 = arith.constant 4 : index
%c2_5 = arith.constant 2 : index
%c1_6 = arith.constant 1 : index
%c16_7 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_8 = arith.constant 8 : index
%c128_9 = arith.constant 128 : index
%c0_10 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%alloc_11 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.dma_memcpy_nd (%alloc_11[] [] [], %arg16[%arg12, %c0_10, %c0_10, %c0_10] [%c2_5, %c2_5, %c4, %c8_8] [%c8_8, %c64, %c16_7, %c1_6]) : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x8x16xf32, 1 : i32>)
%alloc_12 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.dma_memcpy_nd (%alloc_12[] [] [], %arg17[%c0_10, %arg13, %c0_10, %c0_10] [%c2_5, %c2_5, %c8_8, %c8_8] [%c8_8, %c128_9, %c16_7, %c1_6]) : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x16x16xf32, 1 : i32>)
%alloc_13 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_11, %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%5 = arith.mulf %in, %in_14 : f32
%6 = arith.addf %out, %5 : f32
linalg.yield %6 : f32
}
air.dma_memcpy_nd (%arg18[%arg12, %arg13, %c0_10, %c0_10] [%c1_6, %c1_6, %c8_8, %c16_7] [%c128_9, %c128_9, %c16_7, %c1_6], %alloc_13[%c0_10, %c0_10, %c0_10, %c0_10, %c0_10, %c0_10] [%c1_6, %c1_6, %c2_5, %c4, %c2_5, %c8_8] [%c128_9, %c128_9, %c32_4, %c8_8, %c64, %c1_6]) : (memref<1x1x8x16xf32, 1 : i32>, memref<1x1x2x2x4x8xf32, 2 : i32>)
memref.dealloc %alloc_11 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>
air.herd_terminator
}
air.dma_memcpy_nd (%arg11[%3, %4] [%c8, %c16] [%c32, %c1_1], %alloc_3[%c0_0, %c0_0, %c0_0, %c0_0] [%c1_1, %c1_1, %c8, %c16] [%c128, %c128, %c16, %c1_1]) : (memref<8x32xf32>, memref<1x1x8x16xf32, 1 : i32>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRDependency (air-dependency) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
memref.assume_alignment %0, 64 : memref<8x16xf32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
memref.assume_alignment %1, 64 : memref<16x32xf32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
memref.assume_alignment %2, 64 : memref<8x32xf32>
air.launch (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%0, %arg5=%1, %arg6=%2) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
air.segment @segment_0 args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg4, %arg10=%arg5, %arg11=%arg6) : index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> {
%c128 = arith.constant 128 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_0 = arith.constant 0 : index
%c1_1 = arith.constant 1 : index
%3 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
%4 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc[] [] [], %arg9[%3, %c0_0] [%c8, %c16] [%c16, %c1_1]) : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32>)
%alloc_2 = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.dma_memcpy_nd (%alloc_2[] [] [], %arg10[%c0_0, %4] [%c16, %c16] [%c32, %c1_1]) : (memref<1x1x16x16xf32, 1 : i32>, memref<16x32xf32>)
%alloc_3 = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.herd @herd_0 tile (%arg12, %arg13) in (%arg14=%c1_1, %arg15=%c1_1) args(%arg16=%alloc, %arg17=%alloc_2, %arg18=%alloc_3) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> {
%c32_4 = arith.constant 32 : index
%c4 = arith.constant 4 : index
%c2_5 = arith.constant 2 : index
%c1_6 = arith.constant 1 : index
%c16_7 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_8 = arith.constant 8 : index
%c128_9 = arith.constant 128 : index
%c0_10 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%alloc_11 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.dma_memcpy_nd (%alloc_11[] [] [], %arg16[%arg12, %c0_10, %c0_10, %c0_10] [%c2_5, %c2_5, %c4, %c8_8] [%c8_8, %c64, %c16_7, %c1_6]) : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x8x16xf32, 1 : i32>)
%alloc_12 = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.dma_memcpy_nd (%alloc_12[] [] [], %arg17[%c0_10, %arg13, %c0_10, %c0_10] [%c2_5, %c2_5, %c8_8, %c8_8] [%c8_8, %c128_9, %c16_7, %c1_6]) : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x16x16xf32, 1 : i32>)
%alloc_13 = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
linalg.fill ins(%cst : f32) outs(%alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%alloc_11, %alloc_12 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_14: f32, %out: f32):
%5 = arith.mulf %in, %in_14 : f32
%6 = arith.addf %out, %5 : f32
linalg.yield %6 : f32
}
air.dma_memcpy_nd (%arg18[%arg12, %arg13, %c0_10, %c0_10] [%c1_6, %c1_6, %c8_8, %c16_7] [%c128_9, %c128_9, %c16_7, %c1_6], %alloc_13[%c0_10, %c0_10, %c0_10, %c0_10, %c0_10, %c0_10] [%c1_6, %c1_6, %c2_5, %c4, %c2_5, %c8_8] [%c128_9, %c128_9, %c32_4, %c8_8, %c64, %c1_6]) : (memref<1x1x8x16xf32, 1 : i32>, memref<1x1x2x2x4x8xf32, 2 : i32>)
memref.dealloc %alloc_11 : memref<1x1x2x2x4x8xf32, 2 : i32>
memref.dealloc %alloc_12 : memref<1x1x2x2x8x8xf32, 2 : i32>
memref.dealloc %alloc_13 : memref<1x1x2x2x4x8xf32, 2 : i32>
air.herd_terminator
}
air.dma_memcpy_nd (%arg11[%3, %4] [%c8, %c16] [%c32, %c1_1], %alloc_3[%c0_0, %c0_0, %c0_0, %c0_0] [%c1_1, %c1_1, %c8, %c16] [%c128, %c128, %c16, %c1_1]) : (memref<8x32xf32>, memref<1x1x8x16xf32, 1 : i32>)
memref.dealloc %alloc : memref<1x1x8x16xf32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x1x16x16xf32, 1 : i32>
memref.dealloc %alloc_3 : memref<1x1x8x16xf32, 1 : i32>
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRDependencyScheduleOpt (air-dependency-schedule-opt) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
} {id = 1 : i32}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
} {id = 2 : i32}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
} {id = 3 : i32}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
} {id = 4 : i32}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
} {id = 5 : i32}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
} {id = 6 : i32}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 3 : i32} {
%1 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg4, %arg10=%arg5, %arg11=%arg6) : index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 2 : i32} {
%c128 = arith.constant 128 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%6 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
air.execute_terminator %6 : index
} {id = 7 : i32}
%async_token_11, %results_12 = air.execute -> (index) {
%6 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
air.execute_terminator %6 : index
} {id = 8 : i32}
%async_token_13, %results_14 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 9 : i32}
%2 = air.dma_memcpy_nd async [%async_token_9, %async_token_13] (%results_14[] [] [], %arg9[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32>)
%async_token_15, %results_16 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
} {id = 10 : i32}
%3 = air.dma_memcpy_nd async [%async_token_11, %async_token_15] (%results_16[] [] [], %arg10[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<1x1x16x16xf32, 1 : i32>, memref<16x32xf32>)
%async_token_17, %results_18 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 11 : i32}
%4 = air.herd @herd_0 async [%2, %3, %async_token_17] tile (%arg12, %arg13) in (%arg14=%c1_8, %arg15=%c1_8) args(%arg16=%results_14, %arg17=%results_16, %arg18=%results_18) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> attributes {id = 1 : i32} {
%c32_22 = arith.constant 32 : index
%c4 = arith.constant 4 : index
%c2_23 = arith.constant 2 : index
%c1_24 = arith.constant 1 : index
%c16_25 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_26 = arith.constant 8 : index
%c128_27 = arith.constant 128 : index
%c0_28 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_29, %results_30 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 12 : i32}
%6 = air.dma_memcpy_nd async [%async_token_29] (%results_30[] [] [], %arg16[%arg12, %c0_28, %c0_28, %c0_28] [%c2_23, %c2_23, %c4, %c8_26] [%c8_26, %c64, %c16_25, %c1_24]) {id = 3 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x8x16xf32, 1 : i32>)
%async_token_31, %results_32 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 13 : i32}
%7 = air.dma_memcpy_nd async [%async_token_31] (%results_32[] [] [], %arg17[%c0_28, %arg13, %c0_28, %c0_28] [%c2_23, %c2_23, %c8_26, %c8_26] [%c8_26, %c128_27, %c16_25, %c1_24]) {id = 4 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x16x16xf32, 1 : i32>)
%async_token_33, %results_34 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 14 : i32}
%async_token_35 = air.execute [%async_token_33] {
linalg.fill ins(%cst : f32) outs(%results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>)
} {id = 15 : i32}
%async_token_36 = air.execute [%async_token_35, %7, %6] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_30, %results_32 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_40: f32, %out: f32):
%9 = arith.mulf %in, %in_40 : f32
%10 = arith.addf %out, %9 : f32
linalg.yield %10 : f32
}
} {id = 16 : i32}
%8 = air.dma_memcpy_nd async [%async_token_36] (%arg18[%arg12, %arg13, %c0_28, %c0_28] [%c1_24, %c1_24, %c8_26, %c16_25] [%c128_27, %c128_27, %c16_25, %c1_24], %results_34[%c0_28, %c0_28, %c0_28, %c0_28, %c0_28, %c0_28] [%c1_24, %c1_24, %c2_23, %c4, %c2_23, %c8_26] [%c128_27, %c128_27, %c32_22, %c8_26, %c64, %c1_24]) {id = 5 : i32} : (memref<1x1x8x16xf32, 1 : i32>, memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37 = air.execute [%async_token_36] {
memref.dealloc %results_30 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 17 : i32}
%async_token_38 = air.execute [%async_token_36] {
memref.dealloc %results_32 : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 18 : i32}
%async_token_39 = air.execute [%8] {
memref.dealloc %results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 19 : i32}
air.herd_terminator
}
%5 = air.dma_memcpy_nd async [%4] (%arg11[%results_10, %results_12] [%c8, %c16] [%c32, %c1_8], %results_18[%c0_7, %c0_7, %c0_7, %c0_7] [%c1_8, %c1_8, %c8, %c16] [%c128, %c128, %c16, %c1_8]) {id = 6 : i32} : (memref<8x32xf32>, memref<1x1x8x16xf32, 1 : i32>)
%async_token_19 = air.execute [%4] {
memref.dealloc %results_14 : memref<1x1x8x16xf32, 1 : i32>
} {id = 20 : i32}
%async_token_20 = air.execute [%4] {
memref.dealloc %results_16 : memref<1x1x16x16xf32, 1 : i32>
} {id = 21 : i32}
%async_token_21 = air.execute [%5] {
memref.dealloc %results_18 : memref<1x1x8x16xf32, 1 : i32>
} {id = 22 : i32}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRSpecializeDmaBroadcast (air-specialize-dma-broadcast) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
} {id = 1 : i32}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
} {id = 2 : i32}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
} {id = 3 : i32}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
} {id = 4 : i32}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
} {id = 5 : i32}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
} {id = 6 : i32}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 3 : i32} {
%1 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg4, %arg10=%arg5, %arg11=%arg6) : index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 2 : i32} {
%c128 = arith.constant 128 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%6 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
air.execute_terminator %6 : index
} {id = 7 : i32}
%async_token_11, %results_12 = air.execute -> (index) {
%6 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
air.execute_terminator %6 : index
} {id = 8 : i32}
%async_token_13, %results_14 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 9 : i32}
%2 = air.dma_memcpy_nd async [%async_token_9, %async_token_13] (%results_14[] [] [], %arg9[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32>)
%async_token_15, %results_16 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
} {id = 10 : i32}
%3 = air.dma_memcpy_nd async [%async_token_11, %async_token_15] (%results_16[] [] [], %arg10[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<1x1x16x16xf32, 1 : i32>, memref<16x32xf32>)
%async_token_17, %results_18 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 11 : i32}
%4 = air.herd @herd_0 async [%2, %3, %async_token_17] tile (%arg12, %arg13) in (%arg14=%c1_8, %arg15=%c1_8) args(%arg16=%results_14, %arg17=%results_16, %arg18=%results_18) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> attributes {id = 1 : i32} {
%c32_22 = arith.constant 32 : index
%c4 = arith.constant 4 : index
%c2_23 = arith.constant 2 : index
%c1_24 = arith.constant 1 : index
%c16_25 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_26 = arith.constant 8 : index
%c128_27 = arith.constant 128 : index
%c0_28 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_29, %results_30 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 12 : i32}
%6 = air.dma_memcpy_nd async [%async_token_29] (%results_30[] [] [], %arg16[%arg12, %c0_28, %c0_28, %c0_28] [%c2_23, %c2_23, %c4, %c8_26] [%c8_26, %c64, %c16_25, %c1_24]) {id = 3 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x8x16xf32, 1 : i32>)
%async_token_31, %results_32 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 13 : i32}
%7 = air.dma_memcpy_nd async [%async_token_31] (%results_32[] [] [], %arg17[%c0_28, %arg13, %c0_28, %c0_28] [%c2_23, %c2_23, %c8_26, %c8_26] [%c8_26, %c128_27, %c16_25, %c1_24]) {id = 4 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x16x16xf32, 1 : i32>)
%async_token_33, %results_34 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 14 : i32}
%async_token_35 = air.execute [%async_token_33] {
linalg.fill ins(%cst : f32) outs(%results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>)
} {id = 15 : i32}
%async_token_36 = air.execute [%async_token_35, %7, %6] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_30, %results_32 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_40: f32, %out: f32):
%9 = arith.mulf %in, %in_40 : f32
%10 = arith.addf %out, %9 : f32
linalg.yield %10 : f32
}
} {id = 16 : i32}
%8 = air.dma_memcpy_nd async [%async_token_36] (%arg18[%arg12, %arg13, %c0_28, %c0_28] [%c1_24, %c1_24, %c8_26, %c16_25] [%c128_27, %c128_27, %c16_25, %c1_24], %results_34[%c0_28, %c0_28, %c0_28, %c0_28, %c0_28, %c0_28] [%c1_24, %c1_24, %c2_23, %c4, %c2_23, %c8_26] [%c128_27, %c128_27, %c32_22, %c8_26, %c64, %c1_24]) {id = 5 : i32} : (memref<1x1x8x16xf32, 1 : i32>, memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37 = air.execute [%async_token_36] {
memref.dealloc %results_30 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 17 : i32}
%async_token_38 = air.execute [%async_token_36] {
memref.dealloc %results_32 : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 18 : i32}
%async_token_39 = air.execute [%8] {
memref.dealloc %results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 19 : i32}
air.herd_terminator
}
%5 = air.dma_memcpy_nd async [%4] (%arg11[%results_10, %results_12] [%c8, %c16] [%c32, %c1_8], %results_18[%c0_7, %c0_7, %c0_7, %c0_7] [%c1_8, %c1_8, %c8, %c16] [%c128, %c128, %c16, %c1_8]) {id = 6 : i32} : (memref<8x32xf32>, memref<1x1x8x16xf32, 1 : i32>)
%async_token_19 = air.execute [%4] {
memref.dealloc %results_14 : memref<1x1x8x16xf32, 1 : i32>
} {id = 20 : i32}
%async_token_20 = air.execute [%4] {
memref.dealloc %results_16 : memref<1x1x16x16xf32, 1 : i32>
} {id = 21 : i32}
%async_token_21 = air.execute [%5] {
memref.dealloc %results_18 : memref<1x1x8x16xf32, 1 : i32>
} {id = 22 : i32}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before DmaToChannel (air-dma-to-channel) //----- //
module {
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
} {id = 1 : i32}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
} {id = 2 : i32}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
} {id = 3 : i32}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
} {id = 4 : i32}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
} {id = 5 : i32}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
} {id = 6 : i32}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 3 : i32} {
%1 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg4, %arg10=%arg5, %arg11=%arg6) : index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 2 : i32} {
%c128 = arith.constant 128 : index
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%6 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
air.execute_terminator %6 : index
} {id = 7 : i32}
%async_token_11, %results_12 = air.execute -> (index) {
%6 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
air.execute_terminator %6 : index
} {id = 8 : i32}
%async_token_13, %results_14 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 9 : i32}
%2 = air.dma_memcpy_nd async [%async_token_9, %async_token_13] (%results_14[] [] [], %arg9[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<1x1x8x16xf32, 1 : i32>, memref<8x16xf32>)
%async_token_15, %results_16 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
} {id = 10 : i32}
%3 = air.dma_memcpy_nd async [%async_token_11, %async_token_15] (%results_16[] [] [], %arg10[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<1x1x16x16xf32, 1 : i32>, memref<16x32xf32>)
%async_token_17, %results_18 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 11 : i32}
%4 = air.herd @herd_0 async [%2, %3, %async_token_17] tile (%arg12, %arg13) in (%arg14=%c1_8, %arg15=%c1_8) args(%arg16=%results_14, %arg17=%results_16, %arg18=%results_18) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> attributes {id = 1 : i32} {
%c32_22 = arith.constant 32 : index
%c4 = arith.constant 4 : index
%c2_23 = arith.constant 2 : index
%c1_24 = arith.constant 1 : index
%c16_25 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_26 = arith.constant 8 : index
%c128_27 = arith.constant 128 : index
%c0_28 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_29, %results_30 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 12 : i32}
%6 = air.dma_memcpy_nd async [%async_token_29] (%results_30[] [] [], %arg16[%arg12, %c0_28, %c0_28, %c0_28] [%c2_23, %c2_23, %c4, %c8_26] [%c8_26, %c64, %c16_25, %c1_24]) {id = 3 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x8x16xf32, 1 : i32>)
%async_token_31, %results_32 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 13 : i32}
%7 = air.dma_memcpy_nd async [%async_token_31] (%results_32[] [] [], %arg17[%c0_28, %arg13, %c0_28, %c0_28] [%c2_23, %c2_23, %c8_26, %c8_26] [%c8_26, %c128_27, %c16_25, %c1_24]) {id = 4 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>, memref<1x1x16x16xf32, 1 : i32>)
%async_token_33, %results_34 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 14 : i32}
%async_token_35 = air.execute [%async_token_33] {
linalg.fill ins(%cst : f32) outs(%results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>)
} {id = 15 : i32}
%async_token_36 = air.execute [%async_token_35, %7, %6] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_30, %results_32 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_40: f32, %out: f32):
%9 = arith.mulf %in, %in_40 : f32
%10 = arith.addf %out, %9 : f32
linalg.yield %10 : f32
}
} {id = 16 : i32}
%8 = air.dma_memcpy_nd async [%async_token_36] (%arg18[%arg12, %arg13, %c0_28, %c0_28] [%c1_24, %c1_24, %c8_26, %c16_25] [%c128_27, %c128_27, %c16_25, %c1_24], %results_34[%c0_28, %c0_28, %c0_28, %c0_28, %c0_28, %c0_28] [%c1_24, %c1_24, %c2_23, %c4, %c2_23, %c8_26] [%c128_27, %c128_27, %c32_22, %c8_26, %c64, %c1_24]) {id = 5 : i32} : (memref<1x1x8x16xf32, 1 : i32>, memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37 = air.execute [%async_token_36] {
memref.dealloc %results_30 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 17 : i32}
%async_token_38 = air.execute [%async_token_36] {
memref.dealloc %results_32 : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 18 : i32}
%async_token_39 = air.execute [%8] {
memref.dealloc %results_34 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 19 : i32}
air.herd_terminator
}
%5 = air.dma_memcpy_nd async [%4] (%arg11[%results_10, %results_12] [%c8, %c16] [%c32, %c1_8], %results_18[%c0_7, %c0_7, %c0_7, %c0_7] [%c1_8, %c1_8, %c8, %c16] [%c128, %c128, %c16, %c1_8]) {id = 6 : i32} : (memref<8x32xf32>, memref<1x1x8x16xf32, 1 : i32>)
%async_token_19 = air.execute [%4] {
memref.dealloc %results_14 : memref<1x1x8x16xf32, 1 : i32>
} {id = 20 : i32}
%async_token_20 = air.execute [%4] {
memref.dealloc %results_16 : memref<1x1x16x16xf32, 1 : i32>
} {id = 21 : i32}
%async_token_21 = air.execute [%5] {
memref.dealloc %results_18 : memref<1x1x8x16xf32, 1 : i32>
} {id = 22 : i32}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
} {id = 1 : i32}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
} {id = 2 : i32}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
} {id = 3 : i32}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
} {id = 4 : i32}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
} {id = 5 : i32}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
} {id = 6 : i32}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 3 : i32} {
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
} {id = 7 : i32}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) : (memref<8x16xf32>)
%c32 = arith.constant 32 : index
%c16_11 = arith.constant 16 : index
%c0_12 = arith.constant 0 : index
%c1_13 = arith.constant 1 : index
%async_token_14, %results_15 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
} {id = 8 : i32}
%2 = air.channel.put async [%async_token_14] @channel_1[] (%arg5[%c0_12, %results_15] [%c16_11, %c16_11] [%c32, %c1_13]) : (memref<16x32xf32>)
%c32_16 = arith.constant 32 : index
%c8_17 = arith.constant 8 : index
%c16_18 = arith.constant 16 : index
%c1_19 = arith.constant 1 : index
%async_token_20, %results_21 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
} {id = 7 : i32}
%async_token_22, %results_23 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
} {id = 8 : i32}
%3 = air.channel.get async [%async_token_20, %async_token_22] @channel_5[] (%arg6[%results_21, %results_23] [%c8_17, %c16_18] [%c32_16, %c1_19]) : (memref<8x32xf32>)
%4 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1, %arg9=%arg4, %arg10=%arg5, %arg11=%arg6) : index, index, memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 2 : i32} {
%c128 = arith.constant 128 : index
%c32_24 = arith.constant 32 : index
%c8_25 = arith.constant 8 : index
%c16_26 = arith.constant 16 : index
%c0_27 = arith.constant 0 : index
%c1_28 = arith.constant 1 : index
%async_token_29, %results_30 = air.execute -> (index) {
%15 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
air.execute_terminator %15 : index
} {id = 7 : i32}
%async_token_31, %results_32 = air.execute -> (index) {
%15 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
air.execute_terminator %15 : index
} {id = 8 : i32}
%async_token_33, %results_34 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 9 : i32}
%5 = air.channel.get async [%async_token_29, %async_token_33] @channel_0[] (%results_34[] [] []) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_35, %results_36 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
} {id = 10 : i32}
%6 = air.channel.get async [%async_token_31, %async_token_35] @channel_1[] (%results_36[] [] []) : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_37, %results_38 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 11 : i32}
%c1_39 = arith.constant 1 : index
%c0_40 = arith.constant 0 : index
%c0_41 = arith.constant 0 : index
%c1_42 = arith.constant 1 : index
%c1_43 = arith.constant 1 : index
%7 = air.wait_all async [%async_token_33, %5]
%8 = scf.parallel (%arg12, %arg13) = (%c0_40, %c0_41) to (%c1_42, %c1_43) step (%c1_39, %c1_39) init (%7) -> !air.async.token {
%c4 = arith.constant 4 : index
%c2_57 = arith.constant 2 : index
%c1_58 = arith.constant 1 : index
%c16_59 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_60 = arith.constant 8 : index
%c0_61 = arith.constant 0 : index
%15 = air.channel.put async [%async_token_33, %7] @channel_2[%arg12, %arg13] (%results_34[%arg12, %c0_61, %c0_61, %c0_61] [%c2_57, %c2_57, %c4, %c8_60] [%c8_60, %c64, %c16_59, %c1_58]) : (memref<1x1x8x16xf32, 1 : i32>)
%16 = air.wait_all async [%15]
scf.reduce(%16 : !air.async.token) {
^bb0(%arg14: !air.async.token, %arg15: !air.async.token):
%17 = air.wait_all async [%arg14, %arg15]
scf.reduce.return %17 : !air.async.token
}
}
%c1_44 = arith.constant 1 : index
%c0_45 = arith.constant 0 : index
%c0_46 = arith.constant 0 : index
%c1_47 = arith.constant 1 : index
%c1_48 = arith.constant 1 : index
%9 = air.wait_all async [%async_token_35, %6]
%10 = scf.parallel (%arg12, %arg13) = (%c0_45, %c0_46) to (%c1_47, %c1_48) step (%c1_44, %c1_44) init (%9) -> !air.async.token {
%c2_57 = arith.constant 2 : index
%c1_58 = arith.constant 1 : index
%c16_59 = arith.constant 16 : index
%c8_60 = arith.constant 8 : index
%c128_61 = arith.constant 128 : index
%c0_62 = arith.constant 0 : index
%15 = air.channel.put async [%async_token_35, %9] @channel_3[%arg12, %arg13] (%results_36[%c0_62, %arg13, %c0_62, %c0_62] [%c2_57, %c2_57, %c8_60, %c8_60] [%c8_60, %c128_61, %c16_59, %c1_58]) : (memref<1x1x16x16xf32, 1 : i32>)
%16 = air.wait_all async [%15]
scf.reduce(%16 : !air.async.token) {
^bb0(%arg14: !air.async.token, %arg15: !air.async.token):
%17 = air.wait_all async [%arg14, %arg15]
scf.reduce.return %17 : !air.async.token
}
}
%c1_49 = arith.constant 1 : index
%c0_50 = arith.constant 0 : index
%c0_51 = arith.constant 0 : index
%c1_52 = arith.constant 1 : index
%c1_53 = arith.constant 1 : index
%11 = air.wait_all async [%async_token_37]
%12 = scf.parallel (%arg12, %arg13) = (%c0_50, %c0_51) to (%c1_52, %c1_53) step (%c1_49, %c1_49) init (%11) -> !air.async.token {
%c1_57 = arith.constant 1 : index
%c16_58 = arith.constant 16 : index
%c8_59 = arith.constant 8 : index
%c128_60 = arith.constant 128 : index
%c0_61 = arith.constant 0 : index
%15 = air.channel.get async [%async_token_37, %11] @channel_4[%arg12, %arg13] (%results_38[%arg12, %arg13, %c0_61, %c0_61] [%c1_57, %c1_57, %c8_59, %c16_58] [%c128_60, %c128_60, %c16_58, %c1_57]) : (memref<1x1x8x16xf32, 1 : i32>)
%16 = air.wait_all async [%15]
scf.reduce(%16 : !air.async.token) {
^bb0(%arg14: !air.async.token, %arg15: !air.async.token):
%17 = air.wait_all async [%arg14, %arg15]
scf.reduce.return %17 : !air.async.token
}
}
%13 = air.herd @herd_0 async [%5, %6, %async_token_37] tile (%arg12, %arg13) in (%arg14=%c1_28, %arg15=%c1_28) args(%arg16=%results_34, %arg17=%results_36, %arg18=%results_38) : memref<1x1x8x16xf32, 1 : i32>, memref<1x1x16x16xf32, 1 : i32>, memref<1x1x8x16xf32, 1 : i32> attributes {id = 1 : i32} {
%c32_57 = arith.constant 32 : index
%c4 = arith.constant 4 : index
%c2_58 = arith.constant 2 : index
%c1_59 = arith.constant 1 : index
%c16_60 = arith.constant 16 : index
%c64 = arith.constant 64 : index
%c8_61 = arith.constant 8 : index
%c128_62 = arith.constant 128 : index
%c0_63 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_64, %results_65 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 12 : i32}
%15 = air.channel.get async [%async_token_64] @channel_2[%arg12, %arg13] (%results_65[] [] []) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_66, %results_67 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 13 : i32}
%16 = air.channel.get async [%async_token_66] @channel_3[%arg12, %arg13] (%results_67[] [] []) : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_68, %results_69 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 14 : i32}
%async_token_70 = air.execute [%async_token_68] {
linalg.fill ins(%cst : f32) outs(%results_69 : memref<1x1x2x2x4x8xf32, 2 : i32>)
} {id = 15 : i32}
%async_token_71 = air.execute [%async_token_70, %16, %15] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_65, %results_67 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_69 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_75: f32, %out: f32):
%18 = arith.mulf %in, %in_75 : f32
%19 = arith.addf %out, %18 : f32
linalg.yield %19 : f32
}
} {id = 16 : i32}
%17 = air.channel.put async [%async_token_71] @channel_4[%arg12, %arg13] (%results_69[%c0_63, %c0_63, %c0_63, %c0_63, %c0_63, %c0_63] [%c1_59, %c1_59, %c2_58, %c4, %c2_58, %c8_61] [%c128_62, %c128_62, %c32_57, %c8_61, %c64, %c1_59]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_72 = air.execute [%async_token_71] {
memref.dealloc %results_65 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 17 : i32}
%async_token_73 = air.execute [%async_token_71] {
memref.dealloc %results_67 : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 18 : i32}
%async_token_74 = air.execute [%17] {
memref.dealloc %results_69 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 19 : i32}
air.herd_terminator
}
%14 = air.channel.put async [%13] @channel_5[] (%results_38[%c0_27, %c0_27, %c0_27, %c0_27] [%c1_28, %c1_28, %c8_25, %c16_26] [%c128, %c128, %c16_26, %c1_28]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_54 = air.execute [%13] {
memref.dealloc %results_34 : memref<1x1x8x16xf32, 1 : i32>
} {id = 20 : i32}
%async_token_55 = air.execute [%13] {
memref.dealloc %results_36 : memref<1x1x16x16xf32, 1 : i32>
} {id = 21 : i32}
%async_token_56 = air.execute [%14] {
memref.dealloc %results_38 : memref<1x1x8x16xf32, 1 : i32>
} {id = 22 : i32}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
} {id = 1 : i32}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
} {id = 2 : i32}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
} {id = 3 : i32}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
} {id = 4 : i32}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
} {id = 5 : i32}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
} {id = 6 : i32}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 3 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
} {id = 7 : i32}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
} {id = 8 : i32}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
} {id = 7 : i32}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
} {id = 8 : i32}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) : (memref<8x32xf32>)
%4 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1) : index, index attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (index) {
%17 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
air.execute_terminator %17 : index
} {id = 7 : i32}
%async_token_24, %results_25 = air.execute -> (index) {
%17 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
air.execute_terminator %17 : index
} {id = 8 : i32}
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 9 : i32}
%5 = air.channel.get async [%async_token_22, %async_token_26] @channel_0[] (%results_27[] [] []) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28, %results_29 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
} {id = 10 : i32}
%6 = air.channel.get async [%async_token_24, %async_token_28] @channel_1[] (%results_29[] [] []) : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_30, %results_31 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 11 : i32}
%7 = air.wait_all async [%async_token_26, %5]
%8 = air.channel.put async [%async_token_26, %7] @channel_2[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%9 = air.wait_all async [%7, %8]
%10 = air.wait_all async [%async_token_28, %6]
%11 = air.channel.put async [%async_token_28, %10] @channel_3[%c0_20, %c0_20] (%results_29[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) : (memref<1x1x16x16xf32, 1 : i32>)
%12 = air.wait_all async [%10, %11]
%13 = air.channel.get async [%async_token_30, %async_token_30] @channel_4[%c0_20, %c0_20] (%results_31[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%14 = air.wait_all async [%async_token_30, %13]
%15 = air.herd @herd_0 async [%5, %6, %async_token_30] tile (%arg9, %arg10) in (%arg11=%c1_21, %arg12=%c1_21) attributes {id = 1 : i32} {
%c32_35 = arith.constant 32 : index
%c4_36 = arith.constant 4 : index
%c2_37 = arith.constant 2 : index
%c1_38 = arith.constant 1 : index
%c64_39 = arith.constant 64 : index
%c8_40 = arith.constant 8 : index
%c128_41 = arith.constant 128 : index
%c0_42 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 12 : i32}
%17 = air.channel.get async [%async_token_43] @channel_2[%arg9, %arg10] (%results_44[] [] []) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_45, %results_46 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 13 : i32}
%18 = air.channel.get async [%async_token_45] @channel_3[%arg9, %arg10] (%results_46[] [] []) : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_47, %results_48 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 14 : i32}
%async_token_49 = air.execute [%async_token_47] {
linalg.fill ins(%cst : f32) outs(%results_48 : memref<1x1x2x2x4x8xf32, 2 : i32>)
} {id = 15 : i32}
%async_token_50 = air.execute [%async_token_49, %18, %17] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_44, %results_46 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_48 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_54: f32, %out: f32):
%20 = arith.mulf %in, %in_54 : f32
%21 = arith.addf %out, %20 : f32
linalg.yield %21 : f32
}
} {id = 16 : i32}
%19 = air.channel.put async [%async_token_50] @channel_4[%arg9, %arg10] (%results_48[%c0_42, %c0_42, %c0_42, %c0_42, %c0_42, %c0_42] [%c1_38, %c1_38, %c2_37, %c4_36, %c2_37, %c8_40] [%c128_41, %c128_41, %c32_35, %c8_40, %c64_39, %c1_38]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_51 = air.execute [%async_token_50] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 17 : i32}
%async_token_52 = air.execute [%async_token_50] {
memref.dealloc %results_46 : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 18 : i32}
%async_token_53 = air.execute [%19] {
memref.dealloc %results_48 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 19 : i32}
air.herd_terminator
}
%16 = air.channel.put async [%15] @channel_5[] (%results_31[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_32 = air.execute [%15] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
} {id = 20 : i32}
%async_token_33 = air.execute [%15] {
memref.dealloc %results_29 : memref<1x1x16x16xf32, 1 : i32>
} {id = 21 : i32}
%async_token_34 = air.execute [%16] {
memref.dealloc %results_31 : memref<1x1x8x16xf32, 1 : i32>
} {id = 22 : i32}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRDependencyCanonicalize (air-dependency-canonicalize) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
} {id = 1 : i32}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
} {id = 2 : i32}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
} {id = 3 : i32}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
} {id = 4 : i32}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
} {id = 5 : i32}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
} {id = 6 : i32}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 3 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
} {id = 7 : i32}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
} {id = 8 : i32}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
} {id = 7 : i32}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
} {id = 8 : i32}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) : (memref<8x32xf32>)
%4 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1) : index, index attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (index) {
%17 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg7]
air.execute_terminator %17 : index
} {id = 7 : i32}
%async_token_24, %results_25 = air.execute -> (index) {
%17 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg8]
air.execute_terminator %17 : index
} {id = 8 : i32}
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 9 : i32}
%5 = air.channel.get async [%async_token_22, %async_token_26] @channel_0[] (%results_27[] [] []) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28, %results_29 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
} {id = 10 : i32}
%6 = air.channel.get async [%async_token_24, %async_token_28] @channel_1[] (%results_29[] [] []) : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_30, %results_31 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
} {id = 11 : i32}
%7 = air.wait_all async [%async_token_26, %5]
%8 = air.channel.put async [%async_token_26, %7] @channel_2[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%9 = air.wait_all async [%7, %8]
%10 = air.wait_all async [%async_token_28, %6]
%11 = air.channel.put async [%async_token_28, %10] @channel_3[%c0_20, %c0_20] (%results_29[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) : (memref<1x1x16x16xf32, 1 : i32>)
%12 = air.wait_all async [%10, %11]
%13 = air.channel.get async [%async_token_30, %async_token_30] @channel_4[%c0_20, %c0_20] (%results_31[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%14 = air.wait_all async [%async_token_30, %13]
%15 = air.herd @herd_0 async [%5, %6, %async_token_30] tile (%arg9, %arg10) in (%arg11=%c1_21, %arg12=%c1_21) attributes {id = 1 : i32} {
%c32_35 = arith.constant 32 : index
%c4_36 = arith.constant 4 : index
%c2_37 = arith.constant 2 : index
%c1_38 = arith.constant 1 : index
%c64_39 = arith.constant 64 : index
%c8_40 = arith.constant 8 : index
%c128_41 = arith.constant 128 : index
%c0_42 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 12 : i32}
%17 = air.channel.get async [%async_token_43] @channel_2[%arg9, %arg10] (%results_44[] [] []) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_45, %results_46 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 13 : i32}
%18 = air.channel.get async [%async_token_45] @channel_3[%arg9, %arg10] (%results_46[] [] []) : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_47, %results_48 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 14 : i32}
%async_token_49 = air.execute [%async_token_47] {
linalg.fill ins(%cst : f32) outs(%results_48 : memref<1x1x2x2x4x8xf32, 2 : i32>)
} {id = 15 : i32}
%async_token_50 = air.execute [%async_token_49, %18, %17] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_44, %results_46 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_48 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_54: f32, %out: f32):
%20 = arith.mulf %in, %in_54 : f32
%21 = arith.addf %out, %20 : f32
linalg.yield %21 : f32
}
} {id = 16 : i32}
%19 = air.channel.put async [%async_token_50] @channel_4[%arg9, %arg10] (%results_48[%c0_42, %c0_42, %c0_42, %c0_42, %c0_42, %c0_42] [%c1_38, %c1_38, %c2_37, %c4_36, %c2_37, %c8_40] [%c128_41, %c128_41, %c32_35, %c8_40, %c64_39, %c1_38]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_51 = air.execute [%async_token_50] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 17 : i32}
%async_token_52 = air.execute [%async_token_50] {
memref.dealloc %results_46 : memref<1x1x2x2x8x8xf32, 2 : i32>
} {id = 18 : i32}
%async_token_53 = air.execute [%19] {
memref.dealloc %results_48 : memref<1x1x2x2x4x8xf32, 2 : i32>
} {id = 19 : i32}
air.herd_terminator
}
%16 = air.channel.put async [%15] @channel_5[] (%results_31[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_32 = air.execute [%15] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
} {id = 20 : i32}
%async_token_33 = air.execute [%15] {
memref.dealloc %results_29 : memref<1x1x16x16xf32, 1 : i32>
} {id = 21 : i32}
%async_token_34 = air.execute [%16] {
memref.dealloc %results_31 : memref<1x1x8x16xf32, 1 : i32>
} {id = 22 : i32}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async args(%arg7=%arg0, %arg8=%arg1) : index, index attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg9, %arg10) in (%arg11=%c1_21, %arg12=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg9, %arg10] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg9, %arg10] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg9, %arg10] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRLabelScfForLoopForPingPongPattern (air-label-scf-for-to-ping-pong) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRPingPongTransformationPattern (air-ping-pong-transform) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRDeAliasMemref (air-dealias-memref) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRIsolateAsyncDmaLoopNests (air-isolate-async-dma-loop-nests) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRSpecializeChannelWrapAndStridePattern (air-specialize-channel-wrap-and-stride) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c64 = arith.constant 64 : index
%c2_17 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c128 = arith.constant 128 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c4, %c8_18] [%c8_18, %c64, %c16_19, %c1_21]) {id = 6 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20, %c0_20] [%c2_17, %c2_17, %c8_18, %c8_18] [%c8_18, %c128, %c16_19, %c1_21]) {id = 7 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 8 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c32_31 = arith.constant 32 : index
%c4_32 = arith.constant 4 : index
%c2_33 = arith.constant 2 : index
%c1_34 = arith.constant 1 : index
%c64_35 = arith.constant 64 : index
%c8_36 = arith.constant 8 : index
%c128_37 = arith.constant 128 : index
%c0_38 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_39] @channel_2[%arg7, %arg8] (%results_40[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_41, %results_42 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_41] @channel_3[%arg7, %arg8] (%results_42[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_43, %results_44 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%async_token_43] {
linalg.fill ins(%cst : f32) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_46 = air.execute [%async_token_45, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_40, %results_42 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_50: f32, %out: f32):
%15 = arith.mulf %in, %in_50 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_46] @channel_4[%arg7, %arg8] (%results_44[%c0_38, %c0_38, %c0_38, %c0_38, %c0_38, %c0_38] [%c1_34, %c1_34, %c2_33, %c4_32, %c2_33, %c8_36] [%c128_37, %c128_37, %c32_31, %c8_36, %c64_35, %c1_34]) {id = 11 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_47 = air.execute [%async_token_46] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_48 = air.execute [%async_token_46] {
memref.dealloc %results_42 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_49 = air.execute [%14] {
memref.dealloc %results_44 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20, %c0_20, %c0_20] [%c1_21, %c1_21, %c8_18, %c16_19] [%c128, %c128, %c16_19, %c1_21]) {id = 12 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c2_17 = arith.constant 2 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20] [%c2_17, %c8_18, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20] [%c2_17, %c16_19, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c2_31 = arith.constant 2 : index
%c1_32 = arith.constant 1 : index
%c64 = arith.constant 64 : index
%c8_33 = arith.constant 8 : index
%c0_34 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_35, %results_36 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_35] @channel_2[%arg7, %arg8] (%results_36[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37, %results_38 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_37] @channel_3[%arg7, %arg8] (%results_38[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_41 = air.execute [%async_token_39] {
linalg.fill ins(%cst : f32) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_42 = air.execute [%async_token_41, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_36, %results_38 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_46: f32, %out: f32):
%15 = arith.mulf %in, %in_46 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_42] @channel_4[%arg7, %arg8] (%results_40[%c0_34, %c0_34, %c0_34] [%c8_33, %c2_31, %c8_33] [%c8_33, %c64, %c1_32]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_43 = air.execute [%async_token_42] {
memref.dealloc %results_36 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_44 = air.execute [%async_token_42] {
memref.dealloc %results_38 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%14] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c2_17 = arith.constant 2 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20] [%c2_17, %c8_18, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20] [%c2_17, %c16_19, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c2_31 = arith.constant 2 : index
%c1_32 = arith.constant 1 : index
%c64 = arith.constant 64 : index
%c8_33 = arith.constant 8 : index
%c0_34 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_35, %results_36 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_35] @channel_2[%arg7, %arg8] (%results_36[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37, %results_38 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_37] @channel_3[%arg7, %arg8] (%results_38[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_41 = air.execute [%async_token_39] {
linalg.fill ins(%cst : f32) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_42 = air.execute [%async_token_41, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_36, %results_38 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_46: f32, %out: f32):
%15 = arith.mulf %in, %in_46 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_42] @channel_4[%arg7, %arg8] (%results_40[%c0_34, %c0_34, %c0_34] [%c8_33, %c2_31, %c8_33] [%c8_33, %c64, %c1_32]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_43 = air.execute [%async_token_42] {
memref.dealloc %results_36 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_44 = air.execute [%async_token_42] {
memref.dealloc %results_38 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%14] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before AIRCollapseHerdPass (air-collapse-herd) //----- //
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c2_17 = arith.constant 2 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20] [%c2_17, %c8_18, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20] [%c2_17, %c16_19, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c2_31 = arith.constant 2 : index
%c1_32 = arith.constant 1 : index
%c64 = arith.constant 64 : index
%c8_33 = arith.constant 8 : index
%c0_34 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_35, %results_36 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_35] @channel_2[%arg7, %arg8] (%results_36[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37, %results_38 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_37] @channel_3[%arg7, %arg8] (%results_38[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_41 = air.execute [%async_token_39] {
linalg.fill ins(%cst : f32) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_42 = air.execute [%async_token_41, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_36, %results_38 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_46: f32, %out: f32):
%15 = arith.mulf %in, %in_46 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_42] @channel_4[%arg7, %arg8] (%results_40[%c0_34, %c0_34, %c0_34] [%c8_33, %c2_31, %c8_33] [%c8_33, %c64, %c1_32]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_43 = air.execute [%async_token_42] {
memref.dealloc %results_36 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_44 = air.execute [%async_token_42] {
memref.dealloc %results_38 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%14] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
%c8 = arith.constant 8 : index
%c16 = arith.constant 16 : index
%c0_7 = arith.constant 0 : index
%c1_8 = arith.constant 1 : index
%async_token_9, %results_10 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%1 = air.channel.put async [%async_token_9] @channel_0[] (%arg4[%results_10, %c0_7] [%c8, %c16] [%c16, %c1_8]) {id = 1 : i32} : (memref<8x16xf32>)
%async_token_11, %results_12 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%2 = air.channel.put async [%async_token_11] @channel_1[] (%arg5[%c0_7, %results_12] [%c16, %c16] [%c32, %c1_8]) {id = 2 : i32} : (memref<16x32xf32>)
%async_token_13, %results_14 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%arg0]
air.execute_terminator %5 : index
}
%async_token_15, %results_16 = air.execute -> (index) {
%5 = affine.apply affine_map<()[s0] -> (s0 * 16)>()[%arg1]
air.execute_terminator %5 : index
}
%3 = air.channel.get async [%async_token_13, %async_token_15] @channel_5[] (%arg6[%results_14, %results_16] [%c8, %c16] [%c32, %c1_8]) {id = 3 : i32} : (memref<8x32xf32>)
%4 = air.segment @segment_0 async attributes {id = 2 : i32} {
%c2_17 = arith.constant 2 : index
%c8_18 = arith.constant 8 : index
%c16_19 = arith.constant 16 : index
%c0_20 = arith.constant 0 : index
%c1_21 = arith.constant 1 : index
%async_token_22, %results_23 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%5 = air.channel.get async [%async_token_22] @channel_0[] (%results_23[] [] []) {id = 4 : i32} : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_24, %results_25 = air.execute -> (memref<1x1x16x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x16x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x16x16xf32, 1 : i32>
}
%6 = air.channel.get async [%async_token_24] @channel_1[] (%results_25[] [] []) {id = 5 : i32} : (memref<1x1x16x16xf32, 1 : i32>)
%async_token_26, %results_27 = air.execute -> (memref<1x1x8x16xf32, 1 : i32>) {
%alloc = memref.alloc() : memref<1x1x8x16xf32, 1 : i32>
air.execute_terminator %alloc : memref<1x1x8x16xf32, 1 : i32>
}
%7 = air.channel.put async [%5] @channel_2[%c0_20, %c0_20] (%results_23[%c0_20, %c0_20, %c0_20] [%c2_17, %c8_18, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%8 = air.channel.put async [%6] @channel_3[%c0_20, %c0_20] (%results_25[%c0_20, %c0_20, %c0_20] [%c2_17, %c16_19, %c8_18] [%c8_18, %c16_19, %c1_21]) : (memref<1x1x16x16xf32, 1 : i32>)
%9 = air.channel.get async [%async_token_26] @channel_4[%c0_20, %c0_20] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%10 = air.herd @herd_0 async [%5, %6, %async_token_26] tile (%arg7, %arg8) in (%arg9=%c1_21, %arg10=%c1_21) attributes {id = 3 : i32} {
%c2_31 = arith.constant 2 : index
%c1_32 = arith.constant 1 : index
%c64 = arith.constant 64 : index
%c8_33 = arith.constant 8 : index
%c0_34 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%async_token_35, %results_36 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%12 = air.channel.get async [%async_token_35] @channel_2[%arg7, %arg8] (%results_36[] [] []) {id = 9 : i32} : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_37, %results_38 = air.execute -> (memref<1x1x2x2x8x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x8x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%13 = air.channel.get async [%async_token_37] @channel_3[%arg7, %arg8] (%results_38[] [] []) {id = 10 : i32} : (memref<1x1x2x2x8x8xf32, 2 : i32>)
%async_token_39, %results_40 = air.execute -> (memref<1x1x2x2x4x8xf32, 2 : i32>) {
%alloc = memref.alloc() : memref<1x1x2x2x4x8xf32, 2 : i32>
air.execute_terminator %alloc : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_41 = air.execute [%async_token_39] {
linalg.fill ins(%cst : f32) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>)
}
%async_token_42 = air.execute [%async_token_41, %13, %12] {
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%results_36, %results_38 : memref<1x1x2x2x4x8xf32, 2 : i32>, memref<1x1x2x2x8x8xf32, 2 : i32>) outs(%results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>) {
^bb0(%in: f32, %in_46: f32, %out: f32):
%15 = arith.mulf %in, %in_46 : f32
%16 = arith.addf %out, %15 : f32
linalg.yield %16 : f32
}
}
%14 = air.channel.put async [%async_token_42] @channel_4[%arg7, %arg8] (%results_40[%c0_34, %c0_34, %c0_34] [%c8_33, %c2_31, %c8_33] [%c8_33, %c64, %c1_32]) : (memref<1x1x2x2x4x8xf32, 2 : i32>)
%async_token_43 = air.execute [%async_token_42] {
memref.dealloc %results_36 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
%async_token_44 = air.execute [%async_token_42] {
memref.dealloc %results_38 : memref<1x1x2x2x8x8xf32, 2 : i32>
}
%async_token_45 = air.execute [%14] {
memref.dealloc %results_40 : memref<1x1x2x2x4x8xf32, 2 : i32>
}
air.herd_terminator
}
%11 = air.channel.put async [%10] @channel_5[] (%results_27[%c0_20, %c0_20] [%c8_18, %c16_19] [%c16_19, %c1_21]) : (memref<1x1x8x16xf32, 1 : i32>)
%async_token_28 = air.execute [%7] {
memref.dealloc %results_23 : memref<1x1x8x16xf32, 1 : i32>
}
%async_token_29 = air.execute [%8] {
memref.dealloc %results_25 : memref<1x1x16x16xf32, 1 : i32>
}
%async_token_30 = air.execute [%11] {
memref.dealloc %results_27 : memref<1x1x8x16xf32, 1 : i32>
}
air.segment_terminator
}
air.launch_terminator
}
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
air.channel @channel_5 [1, 1]
air.channel @channel_4 [1, 1]
air.channel @channel_3 [1, 1]
air.channel @channel_2 [1, 1]
air.channel @channel_1 [1, 1]
air.channel @channel_0 [1, 1]
func.func @matmul_static_dispatch_0_matmul_8x32x16_f32() {
%c2 = arith.constant 2 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%async_token, %results = air.execute -> (memref<8x16xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<8x16xf32>
air.execute_terminator %1 : memref<8x16xf32>
}
%async_token_0 = air.execute [%async_token] {
memref.assume_alignment %results, 64 : memref<8x16xf32>
}
%async_token_1, %results_2 = air.execute -> (memref<16x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<16x32xf32>
air.execute_terminator %1 : memref<16x32xf32>
}
%async_token_3 = air.execute [%async_token_1] {
memref.assume_alignment %results_2, 64 : memref<16x32xf32>
}
%async_token_4, %results_5 = air.execute -> (memref<8x32xf32>) {
%1 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<8x32xf32>
air.execute_terminator %1 : memref<8x32xf32>
}
%async_token_6 = air.execute [%async_token_4] {
memref.assume_alignment %results_5, 64 : memref<8x32xf32>
}
%0 = air.launch async [%async_token_0, %async_token_3, %async_token_6] (%arg0, %arg1) in (%arg2=%c1, %arg3=%c2) args(%arg4=%results, %arg5=%results_2, %arg6=%results_5) : memref<8x16xf32>, memref<16x32xf32>, memref<8x32xf32> attributes {id = 1 : i32} {
%c32 = arith.constant 32 : index
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment