-
-
Save Abhishek-Varma/f4e42589480db1c534a5a285afe4cca2 to your computer and use it in GitHub Desktop.
BMM PM issue solved by disable-loop-unrolling Peano flag
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump Before AutoInputConversionPipelinePass (iree-auto-input-conversion) //----- // | |
module { | |
func.func @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before IREEImportPublicPass (iree-import-public) //----- // | |
module { | |
func.func @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before ImportMLProgramPass (iree-import-ml-program) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before SanitizeModuleNamesPass (iree-sanitize-module-names) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before ConvertMeshToFlowPass (iree-convert-mesh-to-flow) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before DemoteF64ToF32Pass (iree-input-conversion-demote-f64-to-f32) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before mlir::iree_compiler::IREE::ABI::ConvertStreamableOpsPass (iree-abi-convert-streamable-ops) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before mlir::iree_compiler::IREE::ABI::WrapEntryPointsPass (iree-abi-wrap-entry-points) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before Inliner (inline) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = util.call @_batch_matmul(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
util.func private @_batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func private @_batch_matmul(%arg0: tensor<1x128x256xbf16>, %arg1: tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = tensor.empty() : tensor<1x128x128xf32> | |
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%1 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
util.return %2 : tensor<1x128x128xf32> | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = util.call @_batch_matmul(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SymbolDCE (symbol-dce) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- // | |
module { | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {hal.device.targets = [#device_target_xrt]} { | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before LinalgQuantizedConvToConvPass (iree-global-opt-quantized-conv-to-conv) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before LinalgQuantizedMatmulToMatmulPass (iree-global-opt-quantized-matmul-to-matmul) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before RemoveZeroExtentTensorsPass (iree-global-opt-remove-zero-extent-tensors) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before DetachElementwiseFromNamedOpsPass (iree-global-opt-detach-elementwise-from-named-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before LinalgNamedOpConversionPass (linalg-named-op-conversion) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Convert1X1FilterConv2DToMatmulPass (iree-global-opt-convert-1x1-filter-conv2d-to-matmul) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before EraseUnusedLinalgOperandsPass (iree-global-opt-erase-unused-linalg-operands) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ExpandTensorShapesPass (iree-global-opt-expand-tensor-shapes) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ConvertElementwiseToLinalgPass (convert-elementwise-to-linalg) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before DecomposeConcatPass (iree-global-opt-decompose-concat) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before FoldUnitExtentDimsPass (iree-dispatch-creation-fold-unit-extent-dims) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before DemoteContractionInputsToBF16Pass (iree-global-opt-demote-contraction-inputs-to-bf16) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SetEncodingPass (iree-dispatch-creation-set-encoding) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before MaterializeHomogeneousEncodingsPass (iree-global-opt-materialize-homogeneous-encodings) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> | |
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)> | |
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = iree_encoding.set_encoding %0 : tensor<1x128x256xbf16> -> tensor<1x128x256xbf16, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%3 = iree_encoding.set_encoding %1 : tensor<1x256x128xbf16> -> tensor<1x256x128xbf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%4 = tensor.empty() : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = linalg.batch_matmul ins(%2, %3 : tensor<1x128x256xbf16, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>, tensor<1x256x128xbf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) outs(%5 : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = iree_encoding.unset_encoding %6 : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [#map, #map1, #map2], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<1x128x128xf32> | |
%8 = hal.tensor.export %7 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %8 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before MaterializeEncodingIntoNopPass (iree-codegen-materialize-encoding-into-nop) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = iree_encoding.set_encoding %0 : tensor<1x128x256xbf16> -> tensor<1x128x256xbf16, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%3 = iree_encoding.set_encoding %1 : tensor<1x256x128xbf16> -> tensor<1x256x128xbf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%4 = tensor.empty() : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%5 = linalg.fill ins(%cst : f32) outs(%4 : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%6 = linalg.batch_matmul ins(%2, %3 : tensor<1x128x256xbf16, #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>, tensor<1x256x128xbf16, #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) outs(%5 : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>>) -> tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> | |
%7 = iree_encoding.unset_encoding %6 : tensor<1x128x128xf32, #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [bf16, bf16, f32], user_indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], round_dims_to = array<i64: 32, 32, 32>>> -> tensor<1x128x128xf32> | |
%8 = hal.tensor.export %7 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %8 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before SimplifyPackUnpackPass (iree-global-opt-simplify-pack-unpack) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before DataLayoutPropagationPass (iree-global-opt-data-layout-propagation) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before GeneralizeLinalgNamedOpsPass (iree-global-opt-generalize-linalg-named-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before GlobalLoopInvariantCodeMotionPass (iree-global-opt-loop-invariant-code-motion) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before HoistIntoGlobals (iree-util-hoist-into-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before JitGlobalsPass (iree-consteval-jit-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before RaiseSpecialOpsPass (iree-global-opt-raise-special-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before TensorPadToTensorInsertSlicePass (iree-dispatch-creation-tensor-pad-to-tensor-insert-slice) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FusionPreprocessingPass (iree-dispatch-creation-fusion-preprocessing) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before BubbleUpExpandShapesPass (iree-dispatch-creation-bubble-up-expand-shapes) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before BubbleUpExtractSlicesPass (iree-dispatch-creation-bubble-up-extract-slices) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ElementwiseOpFusionPass (iree-dispatch-creation-elementwise-op-fusion) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SinkReshapesPass (iree-dispatch-creation-sink-reshapes) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before FuseMultiUseElementwiseProducerPass (iree-dispatch-creation-fuse-multi-use-elementwise-producer) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SplitReductionPass (iree-dispatch-creation-split-reduction-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before TransposeGenericOpsPass (iree-dispatch-creation-transpose-generic-ops) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before FormScalarDispatchesPass (iree-dispatch-creation-form-scalar-dispatches) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before FormDispatchRegionsPass (iree-dispatch-creation-form-dispatch-regions) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CloneProducersIntoDispatchRegionsPass (iree-dispatch-creation-clone-producers-into-dispatch-regions) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = flow.dispatch.region -> (tensor<1x128x128xf32>) { | |
%6 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.return %6 : tensor<1x128x128xf32> | |
} | |
%5 = hal.tensor.export %4 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %5 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CollapseDimensionsPass (iree-dispatch-creation-collapse-dimensions) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = flow.dispatch.region -> (tensor<1x128x128xf32>) { | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%6 = linalg.fill ins(%cst_0 : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.return %7 : tensor<1x128x128xf32> | |
} | |
%4 = hal.tensor.export %3 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ConvertDispatchRegionsToWorkgroupsPass (iree-dispatch-creation-convert-dispatch-regions-to-workgroups) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = flow.dispatch.region -> (tensor<1x128x128xf32>) { | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%6 = linalg.fill ins(%cst_0 : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.return %7 : tensor<1x128x128xf32> | |
} | |
%4 = hal.tensor.export %3 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ConvertTensorToFlowPass (iree-dispatch-creation-convert-tensor-to-flow) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%5 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%6 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%7 = tensor.empty() : tensor<1x128x128xf32> | |
%cst_0 = arith.constant 0.000000e+00 : f32 | |
%8 = linalg.fill ins(%cst_0 : f32) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%9 = linalg.batch_matmul ins(%5, %6 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%8 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %9, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} | |
%4 = hal.tensor.export %3 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before MaterializeDefaultWorkgroupCountRegionPass (iree-dispatch-creation-materialize-default-workgroup-count-region) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before VerifyInputLegalityPass (iree-verify-input-legality) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CaptureDynamicDimsPass (iree-flow-capture-dynamic-dims) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before InitializeEmptyTensorsPass (iree-flow-initialize-empty-tensors) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before OutlineDispatchExternsPass (iree-flow-outline-dispatch-externs) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before OutlineDispatchRegionsPass (iree-flow-outline-dispatch-regions) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch.workgroups(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> = | |
(%arg2: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg3: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg4: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%4 = flow.dispatch.tensor.load %arg2, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%5 = flow.dispatch.tensor.load %arg3, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%6 = tensor.empty() : tensor<1x128x128xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = linalg.batch_matmul ins(%4, %5 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%7 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %8, %arg4, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
flow.return | |
} count() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before AnnotateDispatchesPass (iree-flow-annotate-dispatches) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before StripDebugOps (iree-util-strip-debug-ops) //----- // | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before DeduplicateExecutablesPass (iree-flow-deduplicate-executables) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before InjectTensorTracingPass (iree-flow-inject-tensor-tracing) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CleanupTensorShapesPass (iree-flow-cleanup-tensor-shapes) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before OutlineConstantsPass (iree-flow-outline-constants) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CanonicalizerPass (iree-flow-canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before SymbolDCE (symbol-dce) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyInputPass (iree-stream-verify-input) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ConvertToStreamPass (iree-stream-conversion) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
flow.executable private @batch_matmul_dispatch_0 { | |
flow.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
flow.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>>, %arg1: !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>>, %arg2: !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>>) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%0 = flow.dispatch.tensor.load %arg0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%1 = flow.dispatch.tensor.load %arg1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%2 = tensor.empty() : tensor<1x128x128xf32> | |
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%4 = linalg.batch_matmul ins(%0, %1 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%3 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %4, %arg2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%0 = hal.tensor.import %arg0 "input0" : !hal.buffer_view -> tensor<1x128x256xbf16> | |
%1 = hal.tensor.import %arg1 "input1" : !hal.buffer_view -> tensor<1x256x128xbf16> | |
%2 = flow.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0, %1) : (tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) -> tensor<1x128x128xf32> | |
%3 = hal.tensor.export %2 "output0" : tensor<1x128x128xf32> -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyLoweringToTensorsPass (iree-stream-verify-lowering-to-tensors) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c1 = arith.constant 1 : index | |
%c128 = arith.constant 128 : index | |
%c256 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_bf16_0 = hal.element_type<bf16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
%c1_2 = arith.constant 1 : index | |
%c256_3 = arith.constant 256 : index | |
%c128_4 = arith.constant 128 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1_2, %c256_3, %c128_4]) type(%element_type_bf16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%c0 = arith.constant 0 : index | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Inliner (inline) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c1 = arith.constant 1 : index | |
%c128 = arith.constant 128 : index | |
%c256 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_bf16_0 = hal.element_type<bf16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
%c1_2 = arith.constant 1 : index | |
%c256_3 = arith.constant 256 : index | |
%c128_4 = arith.constant 128 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1_2, %c256_3, %c128_4]) type(%element_type_bf16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%c0 = arith.constant 0 : index | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%cst = arith.constant 0.000000e+00 : f32 | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
%c1 = arith.constant 1 : index | |
%c128 = arith.constant 128 : index | |
%c256 = arith.constant 256 : index | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_bf16_0 = hal.element_type<bf16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
%c1_2 = arith.constant 1 : index | |
%c256_3 = arith.constant 256 : index | |
%c128_4 = arith.constant 128 : index | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1_2, %c256_3, %c128_4]) type(%element_type_bf16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%c0 = arith.constant 0 : index | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_bf16_0 = hal.element_type<bf16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
%element_type_bf16_0 = hal.element_type<bf16> : i32 | |
%dense_row_major_1 = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16_0) encoding(%dense_row_major_1) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CombineInitializers (iree-util-combine-initializers) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before EncodeDeviceTensorsPass (iree-stream-encode-device-tensors) //----- // | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump Before EncodeHostTensorsPass (iree-stream-encode-host-tensors) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x256xbf16> : index | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%0} | |
%2 = stream.async.transfer %1 : !stream.resource<external>{%0} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%0} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%3 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x256x128xbf16> : index | |
%4 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%3} | |
%5 = stream.async.transfer %4 : !stream.resource<external>{%3} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%3} | |
%6 = stream.tensor.sizeof on(#hal.device.affinity<@__device_0>) tensor<1x128x128xf32> : index | |
%7 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%2[%c0 to %0 for %0], %5[%c0 to %3 for %3]) : (!stream.resource<*>{%0}, !stream.resource<*>{%3}) -> !stream.resource<*>{%6} | |
%8 = stream.async.transfer %7 : !stream.resource<*>{%6} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%6} | |
%9 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %8 : tensor<1x128x128xf32> in !stream.resource<external>{%6} -> !hal.buffer_view | |
util.return %9 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyLoweringToAsyncResourcesPass (iree-stream-verify-lowering-to-async-resources) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before MaterializeCopyOnWritePass (iree-stream-materialize-copy-on-write) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ElideAsyncCopiesPass (iree-stream-elide-async-copies) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before EmplaceAllocationsPass (iree-stream-emplace-allocations) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before RefineUsagePass (iree-stream-refine-usage) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
%1 = stream.async.transfer %0 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%2 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%3 = stream.async.transfer %2 : !stream.resource<external>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<*>{%c65536} | |
%4 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%1[%c0 to %c65536 for %c65536], %3[%c0 to %c65536 for %c65536]) : (!stream.resource<*>{%c65536}, !stream.resource<*>{%c65536}) -> !stream.resource<*>{%c65536} | |
%5 = stream.async.transfer %4 : !stream.resource<*>{%c65536} from(#hal.device.affinity<@__device_0>) -> to(#hal.device.affinity<@__device_0>) !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyAsyncAccessRangesPass (iree-stream-verify-async-access-ranges) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ScheduleExecutionPass (iree-stream-schedule-execution) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.async.dispatch on(#hal.device.affinity<@__device_0>) @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%0[%c0 to %c65536 for %c65536], %1[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ScheduleConcurrencyPass (iree-stream-schedule-concurrency) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before PropagateTimepointsPass (iree-stream-propagate-timepoints) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before MaterializeBuiltinsPass (iree-stream-materialize-builtins) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.timepoint.immediate => !stream.timepoint | |
%3 = stream.timepoint.immediate => !stream.timepoint | |
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%4) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%7 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %7 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%2 = stream.timepoint.immediate => !stream.timepoint | |
%3 = stream.timepoint.immediate => !stream.timepoint | |
%4 = stream.timepoint.join max(%2, %3) => !stream.timepoint | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) await(%4) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%7 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %7 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%5 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%6 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %5 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %6 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyLoweringToAsyncPass (iree-stream-verify-lowering-to-async) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ScheduleAllocationPass (iree-stream-schedule-allocation) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%results, %result_timepoint = stream.async.execute on(#hal.device.affinity<@__device_0>) with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} { | |
%4 = stream.async.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg2[%c0 to %c65536 for %c65536], %arg3[%c0 to %c65536 for %c65536]) : (!stream.resource<external>{%c65536}, !stream.resource<external>{%c65536}) -> !stream.resource<external>{%c65536} | |
stream.yield %4 : !stream.resource<external>{%c65536} | |
} => !stream.timepoint | |
%2 = stream.timepoint.await %result_timepoint => %results : !stream.resource<external>{%c65536} | |
%3 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %2 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %3 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before PackConstantsPass (iree-stream-pack-constants) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before LayoutSlicesPass (iree-stream-layout-slices) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before PropagateSubranges (iree-util-propagate-subranges) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%c0_0 = arith.constant 0 : index | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyLoweringToCmdPass (iree-stream-verify-lowering-to-cmd) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before SCFToControlFlow (convert-scf-to-cf) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before FixedPointIterator (iree-util-fixed-point-iterator) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ElideTimepointsPass (iree-stream-elide-timepoints) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {iree.fixedpoint.iteration = 0 : index, stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseDispatchBindingsPass (iree-stream-fuse-dispatch-bindings) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before AnnotateDispatchArgumentsPass (iree-stream-annotate-dispatch-arguments) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding, %arg1: !stream.binding, %arg2: !stream.binding, %arg3: index, %arg4: index, %arg5: index) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0, %c0, %c0 : index, index, index) { | |
ro %arg2[%c0_0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0_0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before PackDispatchOperandsPass (iree-stream-pack-dispatch-operands) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: index {stream.values = [0 : index]}, %arg4: index {stream.values = [0 : index]}, %arg5: index {stream.values = [0 : index]}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%arg3] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%arg4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%arg5] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0, %c0, %c0 : index, index, index) { | |
ro %arg2[%c0_0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0_0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%c0_0 = arith.constant 0 : index | |
%c0_i64 = arith.constant 0 : i64 | |
%c0_i32 = arith.constant 0 : i32 | |
%c32_i64 = arith.constant 32 : i64 | |
%c0_i64_1 = arith.constant 0 : i64 | |
%c0_i32_2 = arith.constant 0 : i32 | |
%c0_i64_3 = arith.constant 0 : i64 | |
%c0_i32_4 = arith.constant 0 : i32 | |
%c32_i64_5 = arith.constant 32 : i64 | |
%c0_i64_6 = arith.constant 0 : i64 | |
%c0_i32_7 = arith.constant 0 : i32 | |
%c0_i64_8 = arith.constant 0 : i64 | |
%c0_i32_9 = arith.constant 0 : i32 | |
%c32_i64_10 = arith.constant 32 : i64 | |
%c0_i64_11 = arith.constant 0 : i64 | |
%c0_i32_12 = arith.constant 0 : i32 | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32_2, %c0_i32_4, %c0_i32_7, %c0_i32_9, %c0_i32_12 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0_0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0_0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0_0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg4 : i32 to i64 | |
%1 = arith.shli %0, %c32_i64 : i64 | |
%2 = arith.extui %arg3 : i32 to i64 | |
%3 = arith.ori %2, %1 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%c32_i64_0 = arith.constant 32 : i64 | |
%5 = arith.extui %arg6 : i32 to i64 | |
%6 = arith.shli %5, %c32_i64_0 : i64 | |
%7 = arith.extui %arg5 : i32 to i64 | |
%8 = arith.ori %7, %6 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%c32_i64_1 = arith.constant 32 : i64 | |
%10 = arith.extui %arg8 : i32 to i64 | |
%11 = arith.shli %10, %c32_i64_1 : i64 | |
%12 = arith.extui %arg7 : i32 to i64 | |
%13 = arith.ori %12, %11 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%20 = tensor.empty() : tensor<1x128x128xf32> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%22 = linalg.batch_matmul ins(%18, %19 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%21 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %22, %17, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg4 : i32 to i64 | |
%1 = arith.shli %0, %c32_i64 : i64 | |
%2 = arith.extui %arg3 : i32 to i64 | |
%3 = arith.ori %2, %1 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg6 : i32 to i64 | |
%6 = arith.shli %5, %c32_i64 : i64 | |
%7 = arith.extui %arg5 : i32 to i64 | |
%8 = arith.ori %7, %6 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg8 : i32 to i64 | |
%11 = arith.shli %10, %c32_i64 : i64 | |
%12 = arith.extui %arg7 : i32 to i64 | |
%13 = arith.ori %12, %11 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%20 = tensor.empty() : tensor<1x128x128xf32> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%22 = linalg.batch_matmul ins(%18, %19 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%21 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %22, %17, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg4 : i32 to i64 | |
%1 = arith.shli %0, %c32_i64 : i64 | |
%2 = arith.extui %arg3 : i32 to i64 | |
%3 = arith.ori %2, %1 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg6 : i32 to i64 | |
%6 = arith.shli %5, %c32_i64 : i64 | |
%7 = arith.extui %arg5 : i32 to i64 | |
%8 = arith.ori %7, %6 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg8 : i32 to i64 | |
%11 = arith.shli %10, %c32_i64 : i64 | |
%12 = arith.extui %arg7 : i32 to i64 | |
%13 = arith.ori %12, %11 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%20 = tensor.empty() : tensor<1x128x128xf32> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%22 = linalg.batch_matmul ins(%18, %19 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%21 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %22, %17, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg4 : i32 to i64 | |
%1 = arith.shli %0, %c32_i64 : i64 | |
%2 = arith.extui %arg3 : i32 to i64 | |
%3 = arith.ori %2, %1 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg6 : i32 to i64 | |
%6 = arith.shli %5, %c32_i64 : i64 | |
%7 = arith.extui %arg5 : i32 to i64 | |
%8 = arith.ori %7, %6 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg8 : i32 to i64 | |
%11 = arith.shli %10, %c32_i64 : i64 | |
%12 = arith.extui %arg7 : i32 to i64 | |
%13 = arith.ori %12, %11 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%20 = tensor.empty() : tensor<1x128x128xf32> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%22 = linalg.batch_matmul ins(%18, %19 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%21 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %22, %17, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldUniformOperandsPass (iree-stream-fold-uniform-operands) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32, %arg7: i32, %arg8: i32) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %arg4 : i32 to i64 | |
%1 = arith.shli %0, %c32_i64 : i64 | |
%2 = arith.extui %arg3 : i32 to i64 | |
%3 = arith.ori %2, %1 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %arg6 : i32 to i64 | |
%6 = arith.shli %5, %c32_i64 : i64 | |
%7 = arith.extui %arg5 : i32 to i64 | |
%8 = arith.ori %7, %6 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %arg8 : i32 to i64 | |
%11 = arith.shli %10, %c32_i64 : i64 | |
%12 = arith.extui %arg7 : i32 to i64 | |
%13 = arith.ori %12, %11 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%20 = tensor.empty() : tensor<1x128x128xf32> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%22 = linalg.batch_matmul ins(%18, %19 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%21 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %22, %17, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32, %c0_i32 : i32, i32, i32, i32, i32, i32) { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c0_i32 = arith.constant 0 : i32 | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%c0_i32 = arith.constant 0 : i32 | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c32_i64 = arith.constant 32 : i64 | |
%0 = arith.extui %c0_i32 : i32 to i64 | |
%1 = arith.shli %0, %c32_i64 : i64 | |
%2 = arith.extui %c0_i32 : i32 to i64 | |
%3 = arith.ori %2, %1 : i64 | |
%4 = arith.index_castui %3 {stream.values = [0 : index]} : i64 to index | |
%5 = arith.extui %c0_i32 : i32 to i64 | |
%6 = arith.shli %5, %c32_i64 : i64 | |
%7 = arith.extui %c0_i32 : i32 to i64 | |
%8 = arith.ori %7, %6 : i64 | |
%9 = arith.index_castui %8 {stream.values = [0 : index]} : i64 to index | |
%10 = arith.extui %c0_i32 : i32 to i64 | |
%11 = arith.shli %10, %c32_i64 : i64 | |
%12 = arith.extui %c0_i32 : i32 to i64 | |
%13 = arith.ori %12, %11 : i64 | |
%14 = arith.index_castui %13 {stream.values = [0 : index]} : i64 to index | |
%15 = stream.binding.subspan %arg0[%4] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%16 = stream.binding.subspan %arg1[%9] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%17 = stream.binding.subspan %arg2[%14] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%18 = flow.dispatch.tensor.load %15, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%19 = flow.dispatch.tensor.load %16, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%20 = tensor.empty() : tensor<1x128x128xf32> | |
%21 = linalg.fill ins(%cst : f32) outs(%20 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%22 = linalg.batch_matmul ins(%18, %19 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%21 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %22, %17, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before IPO (iree-util-ipo) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before SymbolDCE (symbol-dce) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before AssignLegacyTargetDevicesPass (iree-hal-assign-legacy-target-devices) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before MaterializeTargetDevicesPass (iree-hal-materialize-target-devices) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ResolveDevicePromisesPass (iree-hal-resolve-device-promises) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ResolveDeviceAliasesPass (iree-hal-resolve-device-aliases) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before SimplifyGlobalAccesses (iree-util-simplify-global-accesses) //----- // | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
// -----// IR Dump Before ApplyPatterns (iree-util-apply-patterns) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FoldGlobals (iree-util-fold-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before FuseGlobals (iree-util-fuse-globals) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before VerifyDevicesPass (iree-hal-verify-devices) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before MaterializeInterfacesPass (iree-hal-materialize-interfaces) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
stream.executable private @batch_matmul_dispatch_0 { | |
stream.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 workgroups() -> (index, index, index) { | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
stream.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32(%arg0: !stream.binding {stream.alignment = 64 : index}, %arg1: !stream.binding {stream.alignment = 64 : index}, %arg2: !stream.binding {stream.alignment = 64 : index}) { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = stream.binding.subspan %arg1[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = stream.binding.subspan %arg2[%c0] : !stream.binding -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before PruneExecutablesPass (iree-hal-prune-executables) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
hal.executable private @batch_matmul_dispatch_0 { | |
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#pipeline_layout) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@amdaie_xclbin_fb::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before DumpExecutableSourcesPass (iree-hal-dump-executable-sources) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
hal.executable private @batch_matmul_dispatch_0 { | |
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#pipeline_layout) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@amdaie_xclbin_fb::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before ConfigureExecutablesPass (iree-hal-configure-executables) //----- // | |
hal.executable private @batch_matmul_dispatch_0 { | |
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}>) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
} | |
// -----// IR Dump Before ConfigureTargetExecutableVariantsPass (iree-hal-configure-target-executable-variants) //----- // | |
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}>) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump Before DumpExecutableSourcesPass (iree-hal-dump-executable-sources) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
hal.executable private @batch_matmul_dispatch_0 { | |
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#pipeline_layout) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@amdaie_xclbin_fb::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before DumpExecutableBenchmarksPass (iree-hal-dump-executable-benchmarks) //----- // | |
#executable_target_amdaie_xclbin_fb = #hal.executable.target<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}> | |
#pipeline_layout = #hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect> | |
#device_target_xrt = #hal.device.target<"xrt", [#executable_target_amdaie_xclbin_fb]> : !hal.device | |
module attributes {stream.affinity.default = #hal.device.affinity<@__device_0>} { | |
util.global private @__device_0 = #device_target_xrt | |
hal.executable private @batch_matmul_dispatch_0 { | |
hal.executable.variant public @amdaie_xclbin_fb target(#executable_target_amdaie_xclbin_fb) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#pipeline_layout) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
} | |
util.func public @batch_matmul(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub, iree.reflection = {iree.abi.declaration = "sync func @batch_matmul(%input0: tensor<1x128x256xbf16>, %input1: tensor<1x256x128xbf16>) -> (%output0: tensor<1x128x128xf32>)"}} { | |
%c65536 = arith.constant 65536 : index | |
%c0 = arith.constant 0 : index | |
%c256 = arith.constant 256 : index | |
%c128 = arith.constant 128 : index | |
%c1 = arith.constant 1 : index | |
%element_type_bf16 = hal.element_type<bf16> : i32 | |
%dense_row_major = hal.encoding_type<dense_row_major> : i32 | |
hal.buffer_view.assert<%arg0 : !hal.buffer_view> message("input0") shape([%c1, %c128, %c256]) type(%element_type_bf16) encoding(%dense_row_major) | |
%0 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg0 : !hal.buffer_view -> tensor<1x128x256xbf16> in !stream.resource<external>{%c65536} | |
hal.buffer_view.assert<%arg1 : !hal.buffer_view> message("input1") shape([%c1, %c256, %c128]) type(%element_type_bf16) encoding(%dense_row_major) | |
%1 = stream.tensor.import on(#hal.device.affinity<@__device_0>) %arg1 : !hal.buffer_view -> tensor<1x256x128xbf16> in !stream.resource<external>{%c65536} | |
%result, %result_timepoint = stream.resource.alloca uninitialized on(#hal.device.affinity<@__device_0>) : !stream.resource<external>{%c65536} => !stream.timepoint | |
%2 = stream.cmd.execute on(#hal.device.affinity<@__device_0>) await(%result_timepoint) => with(%0 as %arg2: !stream.resource<external>{%c65536}, %1 as %arg3: !stream.resource<external>{%c65536}, %result as %arg4: !stream.resource<external>{%c65536}) { | |
stream.cmd.dispatch @batch_matmul_dispatch_0::@amdaie_xclbin_fb::@batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 { | |
ro %arg2[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
ro %arg3[%c0 for %c65536] : !stream.resource<external>{%c65536}, | |
wo %arg4[%c0 for %c65536] : !stream.resource<external>{%c65536} | |
} | |
} => !stream.timepoint | |
%3 = stream.timepoint.await %2 => %result : !stream.resource<external>{%c65536} | |
%4 = stream.tensor.export on(#hal.device.affinity<@__device_0>) %3 : tensor<1x128x128xf32> in !stream.resource<external>{%c65536} -> !hal.buffer_view | |
util.return %4 : !hal.buffer_view | |
} | |
} | |
// -----// IR Dump Before TranslateExecutablesPass (iree-hal-translate-executables) //----- // | |
hal.executable private @batch_matmul_dispatch_0 { | |
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}>) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
} | |
// -----// IR Dump Before TranslateTargetExecutableVariantsPass (iree-hal-translate-target-executable-variants) //----- // | |
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_device = "npu1_4col", ukernels = "none"}>) { | |
hal.executable.export public @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32 ordinal(0) layout(#hal.pipeline.layout<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) { | |
^bb0(%arg0: !hal.device): | |
%x, %y, %z = flow.dispatch.workgroup_count_from_slice | |
hal.return %x, %y, %z : index, index, index | |
} | |
builtin.module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
} | |
// -----// IR Dump Before TypePropagationPass (iree-codegen-type-propagation) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before BubbleUpOrdinalOpsPass (iree-codegen-bubble-up-ordinal-ops) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before BufferizeCopyOnlyDispatchesPass (iree-codegen-bufferize-copy-only-dispatches) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before DecomposeSoftmaxPass (iree-codegen-decompose-softmax) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before MaterializeUserConfigsPass (iree-codegen-materialize-user-configs) //----- // | |
module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
// -----// IR Dump Before AMDAIELoweringStrategy (iree-amdaie-lowering-strategy) //----- // | |
module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
// -----// IR Dump Before LowerExecutableUsingTransformDialectPass (iree-codegen-lower-executable-using-transform-dialect) //----- // | |
module { | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
} | |
// -----// IR Dump Before AMDAIELowerExecutableTarget (iree-amdaie-lower-executable-target) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
flow.dispatch.tensor.store %7, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = linalg.fill ins(%cst : f32) outs(%5 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%7 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%3, %4 : tensor<1x128x256xbf16>, tensor<1x256x128xbf16>) outs(%6 : tensor<1x128x128xf32>) -> tensor<1x128x128xf32> | |
%8 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%9 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
%extracted_slice_2 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%10 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<1x64x256xbf16>, tensor<1x256x64xbf16>) outs(%9 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %10 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %8, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
%8 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<1x64x256xbf16>, tensor<1x256x64xbf16>) outs(%7 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
%8 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<1x64x256xbf16>, tensor<1x256x64xbf16>) outs(%7 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEPackAndTranspose (iree-amdaie-pack-and-transpose) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
%8 = linalg.batch_matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<1x64x256xbf16>, tensor<1x256x64xbf16>) outs(%7 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %8 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEPropagateDataLayout (iree-amdaie-propagate-data-layout) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
%8 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %8 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%9 = tensor.empty() : tensor<1x4x2x32x64xbf16> | |
%10 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %10 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%11 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%pack_3 = tensor.pack %7 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %11 : tensor<1x64x64xf32> -> tensor<1x2x2x32x32xf32> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d2, d6, d5)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x2x4x32x64xbf16>, tensor<1x4x2x64x32xbf16>) outs(%pack_3 : tensor<1x2x2x32x32xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_4: bf16, %out: f32): | |
%13 = arith.extf %in : bf16 to f32 | |
%14 = arith.extf %in_4 : bf16 to f32 | |
%15 = arith.mulf %13, %14 : f32 | |
%16 = arith.addf %out, %15 : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<1x2x2x32x32xf32> | |
%unpack = tensor.unpack %12 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %7 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = linalg.fill ins(%cst : f32) outs(%extracted_slice_1 : tensor<1x64x64xf32>) -> tensor<1x64x64xf32> | |
%8 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %8 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%9 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %9 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%10 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%pack_3 = tensor.pack %7 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %10 : tensor<1x64x64xf32> -> tensor<1x2x2x32x32xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d2, d6, d5)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x2x4x32x64xbf16>, tensor<1x4x2x64x32xbf16>) outs(%pack_3 : tensor<1x2x2x32x32xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_4: bf16, %out: f32): | |
%12 = arith.extf %in : bf16 to f32 | |
%13 = arith.extf %in_4 : bf16 to f32 | |
%14 = arith.mulf %12, %13 : f32 | |
%15 = arith.addf %out, %14 : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<1x2x2x32x32xf32> | |
%unpack = tensor.unpack %11 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %7 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%9 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<1x2x2x32x32xf32>) -> tensor<1x2x2x32x32xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d2, d6, d5)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x2x4x32x64xbf16>, tensor<1x4x2x64x32xbf16>) outs(%10 : tensor<1x2x2x32x32xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_3: bf16, %out: f32): | |
%12 = arith.extf %in : bf16 to f32 | |
%13 = arith.extf %in_3 : bf16 to f32 | |
%14 = arith.mulf %12, %13 : f32 | |
%15 = arith.addf %out, %14 : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<1x2x2x32x32xf32> | |
%unpack = tensor.unpack %11 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%9 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<1x2x2x32x32xf32>) -> tensor<1x2x2x32x32xf32> | |
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d2, d6, d5)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x2x4x32x64xbf16>, tensor<1x4x2x64x32xbf16>) outs(%10 : tensor<1x2x2x32x32xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_3: bf16, %out: f32): | |
%12 = arith.extf %in : bf16 to f32 | |
%13 = arith.extf %in_3 : bf16 to f32 | |
%14 = arith.mulf %12, %13 : f32 | |
%15 = arith.addf %out, %14 : f32 | |
linalg.yield %15 : f32 | |
} -> tensor<1x2x2x32x32xf32> | |
%unpack = tensor.unpack %11 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%9 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<1x2x2x32x32xf32>) -> tensor<1x2x2x32x32xf32> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d2, d6, d5)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x2x4x32x64xbf16>, tensor<1x4x2x64x32xbf16>) outs(%11 : tensor<1x2x2x32x32xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_3: bf16, %out: f32): | |
%13 = arith.extf %in : bf16 to f32 | |
%14 = arith.extf %in_3 : bf16 to f32 | |
%15 = arith.mulf %13, %14 : f32 | |
%16 = arith.addf %out, %15 : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<1x2x2x32x32xf32> | |
%unpack = tensor.unpack %12 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEPackAndTranspose (iree-amdaie-pack-and-transpose) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%9 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<1x2x2x32x32xf32>) -> tensor<1x2x2x32x32xf32> | |
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d3, d4, d6)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d3, d2, d6, d5)>, affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<1x2x4x32x64xbf16>, tensor<1x4x2x64x32xbf16>) outs(%11 : tensor<1x2x2x32x32xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_3: bf16, %out: f32): | |
%13 = arith.extf %in : bf16 to f32 | |
%14 = arith.extf %in_3 : bf16 to f32 | |
%15 = arith.mulf %13, %14 : f32 | |
%16 = arith.addf %out, %15 : f32 | |
linalg.yield %16 : f32 | |
} -> tensor<1x2x2x32x32xf32> | |
%unpack = tensor.unpack %12 inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEPropagateDataLayout (iree-amdaie-propagate-data-layout) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%9 = tensor.empty() : tensor<1x2x2x32x32xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%11 = linalg.fill ins(%cst : f32) outs(%10 : tensor<1x2x2x32x32xf32>) -> tensor<1x2x2x32x32xf32> | |
%12 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%13 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %13 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%14 = tensor.empty() : tensor<1x4x2x8x8x4x8xbf16> | |
%15 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %15 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%16 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%17 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%pack_5 = tensor.pack %11 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %17 : tensor<1x2x2x32x32xf32> -> tensor<1x2x2x8x8x4x4xf32> | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<1x2x4x8x8x4x8xbf16>, tensor<1x4x2x8x8x8x4xbf16>) outs(%pack_5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_7: bf16, %out: f32): | |
%19 = arith.extf %in : bf16 to f32 | |
%20 = arith.extf %in_7 : bf16 to f32 | |
%21 = arith.mulf %19, %20 : f32 | |
%22 = arith.addf %out, %21 : f32 | |
linalg.yield %22 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%unpack = tensor.unpack %18 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %11 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = linalg.fill ins(%cst : f32) outs(%9 : tensor<1x2x2x32x32xf32>) -> tensor<1x2x2x32x32xf32> | |
%11 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %11 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%12 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %12 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%13 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%pack_5 = tensor.pack %10 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %13 : tensor<1x2x2x32x32xf32> -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<1x2x4x8x8x4x8xbf16>, tensor<1x4x2x8x8x8x4xbf16>) outs(%pack_5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_7: bf16, %out: f32): | |
%15 = arith.extf %in : bf16 to f32 | |
%16 = arith.extf %in_7 : bf16 to f32 | |
%17 = arith.mulf %15, %16 : f32 | |
%18 = arith.addf %out, %17 : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %10 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%12 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<1x2x4x8x8x4x8xbf16>, tensor<1x4x2x8x8x8x4xbf16>) outs(%13 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_6: bf16, %out: f32): | |
%15 = arith.extf %in : bf16 to f32 | |
%16 = arith.extf %in_6 : bf16 to f32 | |
%17 = arith.mulf %15, %16 : f32 | |
%18 = arith.addf %out, %17 : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_5 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_5 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%12 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<1x2x4x8x8x4x8xbf16>, tensor<1x4x2x8x8x8x4xbf16>) outs(%13 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_6: bf16, %out: f32): | |
%15 = arith.extf %in : bf16 to f32 | |
%16 = arith.extf %in_6 : bf16 to f32 | |
%17 = arith.mulf %15, %16 : f32 | |
%18 = arith.addf %out, %17 : f32 | |
linalg.yield %18 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_5 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_5 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%12 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%alloc_5 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<1x2x4x8x8x4x8xbf16>, tensor<1x4x2x8x8x8x4xbf16>) outs(%14 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_7: bf16, %out: f32): | |
%16 = arith.extf %in : bf16 to f32 | |
%17 = arith.extf %in_7 : bf16 to f32 | |
%18 = arith.mulf %16, %17 : f32 | |
%19 = arith.addf %out, %18 : f32 | |
linalg.yield %19 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_5 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%12 = tensor.empty() : tensor<1x2x2x8x8x4x4xf32> | |
%alloc_5 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%14 = linalg.fill ins(%cst : f32) outs(%13 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<1x2x4x8x8x4x8xbf16>, tensor<1x4x2x8x8x8x4xbf16>) outs(%14 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_8: bf16, %out: f32): | |
%17 = arith.extf %in : bf16 to f32 | |
%18 = arith.extf %in_8 : bf16 to f32 | |
%19 = arith.mulf %17, %18 : f32 | |
%20 = arith.addf %out, %19 : f32 | |
linalg.yield %20 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%c0_6 = arith.constant 0 : index | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%16 = scf.for %arg4 = %c0_6 to %c4 step %c1 iter_args(%arg5 = %14) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_8 = tensor.extract_slice %pack_3[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_9 = tensor.extract_slice %pack_4[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%extracted_slice_10 = tensor.extract_slice %arg5[0, 0, 0, 0, 0, 0, 0] [1, 2, 2, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x2x2x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_8, %extracted_slice_9 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%extracted_slice_10 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_11: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_11 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%inserted_slice = tensor.insert_slice %17 into %arg5[0, 0, 0, 0, 0, 0, 0] [1, 2, 2, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %inserted_slice : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %16 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_5 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_7 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_7 = tensor.extract_slice %pack_3[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_8 = tensor.extract_slice %pack_4[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_7, %extracted_slice_8 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_9: bf16, %out: f32): | |
%16 = arith.extf %in : bf16 to f32 | |
%17 = arith.extf %in_9 : bf16 to f32 | |
%18 = arith.mulf %16, %17 : f32 | |
%19 = arith.addf %out, %18 : f32 | |
linalg.yield %19 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %15 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_5 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_7 = tensor.extract_slice %pack_3[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_8 = tensor.extract_slice %pack_4[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_7, %extracted_slice_8 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_9: bf16, %out: f32): | |
%16 = arith.extf %in : bf16 to f32 | |
%17 = arith.extf %in_9 : bf16 to f32 | |
%18 = arith.mulf %16, %17 : f32 | |
%19 = arith.addf %out, %18 : f32 | |
linalg.yield %19 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %15 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_5 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEFusePackIntoLoop (iree-amdaie-fuse-pack-into-loop) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_7 = tensor.extract_slice %pack_3[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_8 = tensor.extract_slice %pack_4[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_7, %extracted_slice_8 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_9: bf16, %out: f32): | |
%16 = arith.extf %in : bf16 to f32 | |
%17 = arith.extf %in_9 : bf16 to f32 | |
%18 = arith.mulf %16, %17 : f32 | |
%19 = arith.addf %out, %18 : f32 | |
linalg.yield %19 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %15 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_5 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%pack = tensor.pack %extracted_slice inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %7 : tensor<1x64x256xbf16> -> tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %8 : tensor<1x256x64xbf16> -> tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x4x32x64xbf16> -> tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x4x2x64x32xbf16> -> tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%15 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0, %15] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_8 = tensor.extract_slice %7[0, 0, %arg4, 0, 0] [1, 2, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x4x32x64xbf16> to tensor<1x2x1x32x64xbf16> | |
%pack_9 = tensor.pack %extracted_slice_7 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %extracted_slice_8 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %pack[0, 0, %arg4, 0, 0] [1, 2, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x4x32x64xbf16> to tensor<1x2x1x32x64xbf16> | |
%extracted_slice_11 = tensor.extract_slice %10[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%pack_12 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_11 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_13 = tensor.extract_slice %pack_3[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%16 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_14 = tensor.extract_slice %extracted_slice_0[0, %16, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_15 = tensor.extract_slice %8[0, %arg4, 0, 0, 0] [1, 1, 2, 64, 32] [1, 1, 1, 1, 1] : tensor<1x4x2x64x32xbf16> to tensor<1x1x2x64x32xbf16> | |
%pack_16 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %extracted_slice_15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%extracted_slice_17 = tensor.extract_slice %pack_2[0, %arg4, 0, 0, 0] [1, 1, 2, 64, 32] [1, 1, 1, 1, 1] : tensor<1x4x2x64x32xbf16> to tensor<1x1x2x64x32xbf16> | |
%extracted_slice_18 = tensor.extract_slice %11[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%pack_19 = tensor.pack %pack_16 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_18 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%extracted_slice_20 = tensor.extract_slice %pack_4[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_19 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_21: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_21 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %17 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_5 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_6 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%15 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %15] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_5 = tensor.extract_slice %7[0, 0, %arg4, 0, 0] [1, 2, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x4x32x64xbf16> to tensor<1x2x1x32x64xbf16> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %extracted_slice_5 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %10[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%pack_7 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_6 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%16 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice_0[0, %16, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_9 = tensor.extract_slice %8[0, %arg4, 0, 0, 0] [1, 1, 2, 64, 32] [1, 1, 1, 1, 1] : tensor<1x4x2x64x32xbf16> to tensor<1x1x2x64x32xbf16> | |
%pack_10 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %extracted_slice_9 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%extracted_slice_11 = tensor.extract_slice %11[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%pack_12 = tensor.pack %pack_10 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_11 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_7, %pack_12 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_13: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_13 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %17 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%15 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %15] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_5 = tensor.extract_slice %7[0, 0, %arg4, 0, 0] [1, 2, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x4x32x64xbf16> to tensor<1x2x1x32x64xbf16> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %extracted_slice_5 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %10[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%pack_7 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_6 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice_0[0, %15, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_9 = tensor.extract_slice %8[0, %arg4, 0, 0, 0] [1, 1, 2, 64, 32] [1, 1, 1, 1, 1] : tensor<1x4x2x64x32xbf16> to tensor<1x1x2x64x32xbf16> | |
%pack_10 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %extracted_slice_9 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%extracted_slice_11 = tensor.extract_slice %11[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%pack_12 = tensor.pack %pack_10 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_11 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_7, %pack_12 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_13: bf16, %out: f32): | |
%17 = arith.extf %in : bf16 to f32 | |
%18 = arith.extf %in_13 : bf16 to f32 | |
%19 = arith.mulf %17, %18 : f32 | |
%20 = arith.addf %out, %19 : f32 | |
linalg.yield %20 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%15 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %15] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_5 = tensor.extract_slice %7[0, 0, %arg4, 0, 0] [1, 2, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x4x32x64xbf16> to tensor<1x2x1x32x64xbf16> | |
%alloc_6 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%16 = bufferization.to_tensor %alloc_6 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %16 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_7 = tensor.extract_slice %10[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%pack_8 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice_0[0, %15, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %8[0, %arg4, 0, 0, 0] [1, 1, 2, 64, 32] [1, 1, 1, 1, 1] : tensor<1x4x2x64x32xbf16> to tensor<1x1x2x64x32xbf16> | |
%alloc_11 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%17 = bufferization.to_tensor %alloc_11 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_12 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %17 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%extracted_slice_13 = tensor.extract_slice %11[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%pack_14 = tensor.pack %pack_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_8, %pack_14 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_15: bf16, %out: f32): | |
%19 = arith.extf %in : bf16 to f32 | |
%20 = arith.extf %in_15 : bf16 to f32 | |
%21 = arith.mulf %19, %20 : f32 | |
%22 = arith.addf %out, %21 : f32 | |
linalg.yield %22 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
memref.dealloc %alloc_6 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_11 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %18 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = tensor.empty() : tensor<1x2x4x32x64xbf16> | |
%8 = tensor.empty() : tensor<1x4x2x64x32xbf16> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%9 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%10 = tensor.empty() : tensor<1x2x4x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x4x2x8x8x8x4xbf16> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%13 = linalg.fill ins(%cst : f32) outs(%12 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%14 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%15 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %15] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_5 = tensor.extract_slice %7[0, 0, %arg4, 0, 0] [1, 2, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x4x32x64xbf16> to tensor<1x2x1x32x64xbf16> | |
%alloc_6 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%16 = bufferization.to_tensor %alloc_6 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %16 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_7 = tensor.extract_slice %10[0, 0, %arg4, 0, 0, 0, 0] [1, 2, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x4x8x8x4x8xbf16> to tensor<1x2x1x8x8x4x8xbf16> | |
%pack_8 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice_0[0, %15, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %8[0, %arg4, 0, 0, 0] [1, 1, 2, 64, 32] [1, 1, 1, 1, 1] : tensor<1x4x2x64x32xbf16> to tensor<1x1x2x64x32xbf16> | |
%alloc_11 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%17 = bufferization.to_tensor %alloc_11 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_12 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %17 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%extracted_slice_13 = tensor.extract_slice %11[0, %arg4, 0, 0, 0, 0, 0] [1, 1, 2, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x4x2x8x8x8x4xbf16> to tensor<1x1x2x8x8x8x4xbf16> | |
%pack_14 = tensor.pack %pack_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_8, %pack_14 : tensor<1x2x1x8x8x4x8xbf16>, tensor<1x1x2x8x8x8x4xbf16>) outs(%arg5 : tensor<1x2x2x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_15: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_15 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x2x2x8x8x4x4xf32> | |
%19 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_15 = tensor.extract_slice %pack_8[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_16 = tensor.extract_slice %pack_14[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_17 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_15, %extracted_slice_16 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_17 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_18: bf16, %out: f32): | |
%21 = arith.extf %in : bf16 to f32 | |
%22 = arith.extf %in_18 : bf16 to f32 | |
%23 = arith.mulf %21, %22 : f32 | |
%24 = arith.addf %out, %23 : f32 | |
linalg.yield %24 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %20 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_6 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_11 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %19 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %9 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_8 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack_6[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_12 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_13 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_11, %extracted_slice_12 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_13 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_14: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_14 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_8 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_8 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack_6[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_12 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_13 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_11, %extracted_slice_12 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_13 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_14: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_14 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_8 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEFusePackIntoLoop (iree-amdaie-fuse-pack-into-loop) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_8 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack_6[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_12 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_13 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_11, %extracted_slice_12 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_13 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_14: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_14 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_8 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %10 : tensor<1x2x1x32x64xbf16> -> tensor<1x2x1x8x8x4x8xbf16> | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_8 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %11 : tensor<1x1x2x64x32xbf16> -> tensor<1x1x2x8x8x8x4xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%extracted_slice_12 = tensor.extract_slice %10[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%pack_13 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_12 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_14 = tensor.extract_slice %pack_6[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_15 = tensor.extract_slice %pack_9[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%extracted_slice_16 = tensor.extract_slice %11[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%pack_17 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_16 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_18 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_19 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_17 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_19 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_20: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_20 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_8 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_7 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_7 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_8 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_9 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %10[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%pack_11 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_10 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_12 = tensor.extract_slice %pack_8[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%extracted_slice_13 = tensor.extract_slice %11[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%pack_14 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_15 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_11, %pack_14 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_15 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_16: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_16 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_7 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_7 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_7 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_8 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_9 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %10[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%pack_11 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %extracted_slice_10 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_12 = tensor.extract_slice %pack_8[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%extracted_slice_13 = tensor.extract_slice %11[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%pack_14 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_15 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_11, %pack_14 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_15 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_16: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_16 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_7 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before HoistStaticallyBoundAllocationsPass (iree-codegen-hoist-statically-bound-allocations) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_0 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_1 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%alloc = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%7 = bufferization.to_tensor %alloc restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%alloc_5 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %extracted_slice_0[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%alloc_7 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%15 = bufferization.to_tensor %alloc_7 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_8 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_9 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %10[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%alloc_11 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%17 = bufferization.to_tensor %alloc_11 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_12 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_13 = tensor.extract_slice %pack_8[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%extracted_slice_14 = tensor.extract_slice %11[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%alloc_15 = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%18 = bufferization.to_tensor %alloc_15 restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_16 = tensor.pack %extracted_slice_13 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_17 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_16 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_17 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_18: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_18 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
memref.dealloc %alloc_11 : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
memref.dealloc %alloc_15 : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
memref.dealloc %alloc_5 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_7 : memref<1x1x2x64x32xbf16, 1 : i32> | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
memref.dealloc %alloc : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_3 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%alloc = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%alloc_0 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%alloc_1 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%alloc_3 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%alloc_4 = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%c1 = arith.constant 1 : index | |
%c4 = arith.constant 4 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c0 = arith.constant 0 : index | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_5 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = tensor.empty() : tensor<1x2x1x8x8x4x8xbf16> | |
%11 = tensor.empty() : tensor<1x1x2x8x8x8x4xbf16> | |
%12 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_8 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice_5[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_10 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%extracted_slice_12 = tensor.extract_slice %10[0, %arg6, 0, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 8] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x1x8x8x4x8xbf16> to tensor<1x1x1x8x8x4x8xbf16> | |
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_13 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_14 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%extracted_slice_15 = tensor.extract_slice %11[0, 0, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 8, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x2x8x8x8x4xbf16> to tensor<1x1x1x8x8x8x4xbf16> | |
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_16 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_17 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_16 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_17 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_18: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_18 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_7 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
memref.dealloc %alloc_4 : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_3 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_1 : memref<1x1x2x64x32xbf16, 1 : i32> | |
memref.dealloc %alloc_0 : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
memref.dealloc %alloc : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
return | |
} | |
// -----// IR Dump Before CSE (cse) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%alloc = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%alloc_0 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%alloc_1 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%alloc_3 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%alloc_4 = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_5 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%11 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice[0, 0, %11] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_8 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %12 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice_5[0, %11, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%13 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_10 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %13 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%14 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%15 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_12 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %15 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_13 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%16 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_14 = tensor.pack %extracted_slice_13 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %16 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_15 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_14 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_15 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_16: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_16 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %14 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %10 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_7 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
memref.dealloc %alloc_4 : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_3 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_1 : memref<1x1x2x64x32xbf16, 1 : i32> | |
memref.dealloc %alloc_0 : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
memref.dealloc %alloc : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
return | |
} | |
// -----// IR Dump Before AMDAIEPeelForLoop (iree-amdaie-peel-for-loop) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%alloc = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%alloc_0 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%alloc_1 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%alloc_3 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%alloc_4 = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_5 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%10 = scf.for %arg4 = %c0 to %c4 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%11 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice[0, 0, %11] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_8 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %12 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice_5[0, %11, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%13 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_10 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %13 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%14 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_11 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%15 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_12 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %15 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_13 = tensor.extract_slice %pack_10[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%16 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_14 = tensor.pack %extracted_slice_13 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %16 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_15 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_14 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_15 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_16: bf16, %out: f32): | |
%18 = arith.extf %in : bf16 to f32 | |
%19 = arith.extf %in_16 : bf16 to f32 | |
%20 = arith.mulf %18, %19 : f32 | |
%21 = arith.addf %out, %20 : f32 | |
linalg.yield %21 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %17 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %14 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %10 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_7 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
memref.dealloc %alloc_4 : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_3 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_1 : memref<1x1x2x64x32xbf16, 1 : i32> | |
memref.dealloc %alloc_0 : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
memref.dealloc %alloc : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
return | |
} | |
// -----// IR Dump Before Canonicalizer (canonicalize) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c4 = arith.constant 4 : index | |
%c1 = arith.constant 1 : index | |
%alloc = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%alloc_0 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%alloc_1 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%alloc_3 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%alloc_4 = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_5 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%c1_7 = arith.constant 1 : index | |
%10 = scf.for %arg4 = %c0 to %c1_7 step %c1 iter_args(%arg5 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_9 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %extracted_slice_5[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_11 = tensor.pack %extracted_slice_10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_12 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_14 = tensor.extract_slice %pack_11[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_16 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_15 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_16 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_17: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_17 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%c3 = arith.constant 3 : index | |
%11 = scf.for %arg4 = %c1_7 to %c3 step %c1 iter_args(%arg5 = %10) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_9 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %extracted_slice_5[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_11 = tensor.pack %extracted_slice_10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_12 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_14 = tensor.extract_slice %pack_11[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_16 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_15 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_16 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_17: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_17 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%12 = scf.for %arg4 = %c3 to %c4 step %c1 iter_args(%arg5 = %11) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%13 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_9 = tensor.extract_slice %extracted_slice[0, 0, %13] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_9 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_10 = tensor.extract_slice %extracted_slice_5[0, %13, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_11 = tensor.pack %extracted_slice_10 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_12 = tensor.extract_slice %pack[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_14 = tensor.extract_slice %pack_11[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_16 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_15 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_16 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_17: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_17 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %16 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_8 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_8 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
memref.dealloc %alloc_4 : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_3 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_1 : memref<1x1x2x64x32xbf16, 1 : i32> | |
memref.dealloc %alloc_0 : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
memref.dealloc %alloc : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
return | |
} | |
// -----// IR Dump Before AMDAIEFuseFillIntoForall (iree-amdaie-fuse-fill-into-forall) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c3 = arith.constant 3 : index | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c1 = arith.constant 1 : index | |
%alloc = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%alloc_0 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%alloc_1 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%alloc_3 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%alloc_4 = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_5 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0, 0] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %10 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%11 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %11 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%12 = scf.forall (%arg4, %arg5) in (2, 2) shared_outs(%arg6 = %9) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_15 = tensor.extract_slice %pack[0, %arg4, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_16 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_17 = tensor.extract_slice %pack_9[0, 0, %arg5, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_19 = tensor.extract_slice %arg6[0, %arg4, %arg5, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_16, %pack_18 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_19 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_20: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_20 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg6[0, %arg4, %arg5, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
%13 = scf.for %arg4 = %c1 to %c3 step %c1 iter_args(%arg5 = %12) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%17 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg4) | |
%extracted_slice_15 = tensor.extract_slice %extracted_slice[0, 0, %17] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%18 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack_16 = tensor.pack %extracted_slice_15 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %18 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_17 = tensor.extract_slice %extracted_slice_5[0, %17, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%19 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %19 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%20 = scf.forall (%arg6, %arg7) in (2, 2) shared_outs(%arg8 = %arg5) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_19 = tensor.extract_slice %pack_16[0, %arg6, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%21 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_20 = tensor.pack %extracted_slice_19 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %21 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_21 = tensor.extract_slice %pack_18[0, 0, %arg7, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%22 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_22 = tensor.pack %extracted_slice_21 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %22 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_23 = tensor.extract_slice %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_20, %pack_22 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_23 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_24: bf16, %out: f32): | |
%24 = arith.extf %in : bf16 to f32 | |
%25 = arith.extf %in_24 : bf16 to f32 | |
%26 = arith.mulf %24, %25 : f32 | |
%27 = arith.addf %out, %26 : f32 | |
linalg.yield %27 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %23 into %arg8[0, %arg6, %arg7, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
scf.yield %20 : tensor<1x2x2x8x8x4x4xf32> | |
} | |
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 0, 192] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %14 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[0, 192, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64x64xbf16> | |
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x1x2x64x32xbf16, 1 : i32> | |
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [64, 32] into %15 : tensor<1x64x64xbf16> -> tensor<1x1x2x64x32xbf16> | |
%16 = scf.forall (%arg4, %arg5) in (2, 2) shared_outs(%arg6 = %13) -> (tensor<1x2x2x8x8x4x4xf32>) { | |
%extracted_slice_15 = tensor.extract_slice %pack_11[0, %arg4, 0, 0, 0] [1, 1, 1, 32, 64] [1, 1, 1, 1, 1] : tensor<1x2x1x32x64xbf16> to tensor<1x1x1x32x64xbf16> | |
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%pack_16 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 8] into %17 : tensor<1x1x1x32x64xbf16> -> tensor<1x1x1x8x8x4x8xbf16> | |
%extracted_slice_17 = tensor.extract_slice %pack_13[0, 0, %arg5, 0, 0] [1, 1, 1, 64, 32] [1, 1, 1, 1, 1] : tensor<1x1x2x64x32xbf16> to tensor<1x1x1x64x32xbf16> | |
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [8, 4] into %18 : tensor<1x1x1x64x32xbf16> -> tensor<1x1x1x8x8x8x4xbf16> | |
%extracted_slice_19 = tensor.extract_slice %arg6[0, %arg4, %arg5, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x2x2x8x8x4x4xf32> to tensor<1x1x1x8x8x4x4xf32> | |
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d3, d6, d4, d7, d9)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d3, d2, d5, d6, d9, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d0, d1, d2, d5, d4, d7, d8)>], iterator_types = ["parallel", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_16, %pack_18 : tensor<1x1x1x8x8x4x8xbf16>, tensor<1x1x1x8x8x8x4xbf16>) outs(%extracted_slice_19 : tensor<1x1x1x8x8x4x4xf32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[1, 64, 64], [0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [0, 32, 32, 64], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1, 2]]}, {packedSizes = [0, 0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 2, 4, 3], [0, 1, 2, 4, 3], [0, 1, 2, 4, 3]]}]>} { | |
^bb0(%in: bf16, %in_20: bf16, %out: f32): | |
%20 = arith.extf %in : bf16 to f32 | |
%21 = arith.extf %in_20 : bf16 to f32 | |
%22 = arith.mulf %20, %21 : f32 | |
%23 = arith.addf %out, %22 : f32 | |
linalg.yield %23 : f32 | |
} -> tensor<1x1x1x8x8x4x4xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %19 into %arg6[0, %arg4, %arg5, 0, 0, 0, 0] [1, 1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1, 1] : tensor<1x1x1x8x8x4x4xf32> into tensor<1x2x2x8x8x4x4xf32> | |
} | |
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]} | |
%unpack = tensor.unpack %16 outer_dims_perm = [0, 1, 2, 4, 3] inner_dims_pos = [3, 4] inner_tiles = [4, 4] into %7 : tensor<1x2x2x8x8x4x4xf32> -> tensor<1x2x2x32x32xf32> | |
%unpack_14 = tensor.unpack %unpack inner_dims_pos = [1, 2] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<1x2x2x32x32xf32> -> tensor<1x64x64xf32> | |
scf.forall.in_parallel { | |
tensor.parallel_insert_slice %unpack_14 into %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x64x64xf32> into tensor<1x128x128xf32> | |
} | |
} {mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>]} | |
flow.dispatch.tensor.store %6, %2, offsets = [0, 0, 0], sizes = [1, 128, 128], strides = [1, 1, 1] : tensor<1x128x128xf32> -> !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
memref.dealloc %alloc_4 : memref<1x2x2x32x32xf32, 1 : i32> | |
memref.dealloc %alloc_3 : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
memref.dealloc %alloc_2 : memref<1x2x1x32x64xbf16, 1 : i32> | |
memref.dealloc %alloc_1 : memref<1x1x2x64x32xbf16, 1 : i32> | |
memref.dealloc %alloc_0 : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
memref.dealloc %alloc : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
return | |
} | |
// -----// IR Dump Before AMDAIEFuseConsumerIntoLoop (iree-amdaie-fuse-consumer-into-loop) //----- // | |
func.func @batch_matmul_dispatch_0_batch_matmul_1x128x128x256_bf16xbf16xf32() attributes {translation_info = #iree_codegen.translation_info<Custom>} { | |
%c3 = arith.constant 3 : index | |
%c0 = arith.constant 0 : index | |
%cst = arith.constant 0.000000e+00 : f32 | |
%c1 = arith.constant 1 : index | |
%alloc = memref.alloc() : memref<1x1x1x8x8x8x4xbf16, 2 : i32> | |
%alloc_0 = memref.alloc() : memref<1x1x1x8x8x4x8xbf16, 2 : i32> | |
%alloc_1 = memref.alloc() : memref<1x1x2x64x32xbf16, 1 : i32> | |
%alloc_2 = memref.alloc() : memref<1x2x1x32x64xbf16, 1 : i32> | |
%alloc_3 = memref.alloc() : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%alloc_4 = memref.alloc() : memref<1x2x2x32x32xf32, 1 : i32> | |
%0 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(0) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> | |
%1 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(1) alignment(64) offset(%c0) flags("ReadOnly|Indirect") : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> | |
%2 = hal.interface.binding.subspan layout(<bindings = [#hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, "ReadOnly|Indirect">, #hal.pipeline.binding<storage_buffer, Indirect>], flags = Indirect>) binding(2) alignment(64) offset(%c0) flags(Indirect) : !flow.dispatch.tensor<writeonly:tensor<1x128x128xf32>> | |
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0], sizes = [1, 128, 256], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x128x256xbf16>> -> tensor<1x128x256xbf16> | |
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0], sizes = [1, 256, 128], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<1x256x128xbf16>> -> tensor<1x256x128xbf16> | |
%5 = tensor.empty() : tensor<1x128x128xf32> | |
%6 = scf.forall (%arg0, %arg1, %arg2) = (0, 0, 0) to (1, 128, 128) step (1, 64, 64) shared_outs(%arg3 = %5) -> (tensor<1x128x128xf32>) { | |
%extracted_slice = tensor.extract_slice %3[%arg0, %arg1, 0] [1, 64, 256] [1, 1, 1] : tensor<1x128x256xbf16> to tensor<1x64x256xbf16> | |
%extracted_slice_5 = tensor.extract_slice %4[%arg0, 0, %arg2] [1, 256, 64] [1, 1, 1] : tensor<1x256x128xbf16> to tensor<1x256x64xbf16> | |
%extracted_slice_6 = tensor.extract_slice %arg3[%arg0, %arg1, %arg2] [1, 64, 64] [1, 1, 1] : tensor<1x128x128xf32> to tensor<1x64x64xf32> | |
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<1x2x2x32x32xf32, 1 : i32> | |
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<1x2x2x8x8x4x4xf32, 2 : i32> | |
%9 = linalg.fill ins(%cst : f32) outs(%8 : tensor<1x2x2x8x8x4x4xf32>) -> tensor<1x2x2x8x8x4x4xf32> | |
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0, 0] [1, 64, 64] [1, 1, 1] : tensor<1x64x256xbf16> to tensor<1x64x64xbf16> | |
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<1x2x1x32x64xbf16, 1 : i32> | |
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [1, 2] inner_tiles = [32, 64] into %10 : tensor<1x64x64xbf16> -> tensor<1x2x1x32x64xbf16> | |
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0, 0] [1, 64, 64] [1, 1, 1] : tensor<1x256x64xbf16> to tensor<1x64 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment