Skip to content

Instantly share code, notes, and snippets.

@Abhishek-Varma
Created June 19, 2024 16:32
Show Gist options
  • Save Abhishek-Varma/458014f5d4da6e4bac841a05ae10f172 to your computer and use it in GitHub Desktop.
Save Abhishek-Varma/458014f5d4da6e4bac841a05ae10f172 to your computer and use it in GitHub Desktop.
Current IR state of matmul + vectorization + objectfifo
This file has been truncated, but you can view the full file.
// -----// IR Dump Before TranslateTargetExecutableVariantsPass (iree-hal-translate-target-executable-variants) //----- //
hal.executable.variant public @amdaie_xclbin_fb target(<"amd-aie", "amdaie-xclbin-fb", {target_arch = "chip-tbd", ukernels = "none"}>) {
hal.executable.export public @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) attributes {hal.interface.bindings = [#hal.interface.binding<0, 0>, #hal.interface.binding<0, 1>, #hal.interface.binding<0, 2>]} {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
}
}
// -----// IR Dump Before TypePropagation (iree-codegen-type-propagation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before BubbleUpOrdinalOps (iree-codegen-bubble-up-ordinal-ops) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before BufferizeCopyOnlyDispatches (iree-codegen-bufferize-copy-only-dispatches) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before DecomposeSoftmax (iree-codegen-decompose-softmax) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before MaterializeUserConfigs (iree-codegen-materialize-user-configs) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
}
// -----// IR Dump Before AMDAIELoweringStrategy (iree-amdaie-lowering-strategy) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
}
// -----// IR Dump Before LowerExecutableUsingTransformDialect (iree-codegen-lower-executable-using-transform-dialect) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
}
// -----// IR Dump Before AMDAIELowerExecutableTarget (iree-amdaie-lower-executable-target) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
flow.dispatch.tensor.store %7, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = linalg.fill ins(%c0_i32 : i32) outs(%5 : tensor<128x128xi32>) -> tensor<128x128xi32>
%7 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%3, %4 : tensor<128x256xi32>, tensor<256x128xi32>) outs(%6 : tensor<128x128xi32>) -> tensor<128x128xi32>
%8 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_1 : tensor<64x64xi32>) -> tensor<64x64xi32>
%extracted_slice_2 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%10 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<64x256xi32>, tensor<256x64xi32>) outs(%9 : tensor<64x64xi32>) -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %10 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %8, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_1 : tensor<64x64xi32>) -> tensor<64x64xi32>
%8 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<64x256xi32>, tensor<256x64xi32>) outs(%7 : tensor<64x64xi32>) -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_1 : tensor<64x64xi32>) -> tensor<64x64xi32>
%8 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<64x256xi32>, tensor<256x64xi32>) outs(%7 : tensor<64x64xi32>) -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEPackAndTranspose (iree-amdaie-pack-and-transpose) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_1 : tensor<64x64xi32>) -> tensor<64x64xi32>
%8 = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} ins(%extracted_slice, %extracted_slice_0 : tensor<64x256xi32>, tensor<256x64xi32>) outs(%7 : tensor<64x64xi32>) -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %8 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEPropagateDataLayout (iree-amdaie-propagate-data-layout) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_1 : tensor<64x64xi32>) -> tensor<64x64xi32>
%8 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%9 = tensor.empty() : tensor<8x2x32x32xi32>
%10 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%11 = tensor.empty() : tensor<2x2x32x32xi32>
%pack_3 = tensor.pack %7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %11 : tensor<64x64xi32> -> tensor<2x2x32x32xi32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<2x8x32x32xi32>, tensor<8x2x32x32xi32>) outs(%pack_3 : tensor<2x2x32x32xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_4: i32, %out: i32):
%13 = arith.muli %in, %in_4 : i32
%14 = arith.addi %out, %13 : i32
linalg.yield %14 : i32
} -> tensor<2x2x32x32xi32>
%unpack = tensor.unpack %12 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_1 : tensor<64x64xi32>) -> tensor<64x64xi32>
%8 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%9 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%10 = tensor.empty() : tensor<2x2x32x32xi32>
%pack_3 = tensor.pack %7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<64x64xi32> -> tensor<2x2x32x32xi32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<2x8x32x32xi32>, tensor<8x2x32x32xi32>) outs(%pack_3 : tensor<2x2x32x32xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_4: i32, %out: i32):
%12 = arith.muli %in, %in_4 : i32
%13 = arith.addi %out, %12 : i32
linalg.yield %13 : i32
} -> tensor<2x2x32x32xi32>
%unpack = tensor.unpack %11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%9 = tensor.empty() : tensor<2x2x32x32xi32>
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<2x2x32x32xi32>) -> tensor<2x2x32x32xi32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<2x8x32x32xi32>, tensor<8x2x32x32xi32>) outs(%10 : tensor<2x2x32x32xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_3: i32, %out: i32):
%12 = arith.muli %in, %in_3 : i32
%13 = arith.addi %out, %12 : i32
linalg.yield %13 : i32
} -> tensor<2x2x32x32xi32>
%unpack = tensor.unpack %11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%9 = tensor.empty() : tensor<2x2x32x32xi32>
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<2x2x32x32xi32>) -> tensor<2x2x32x32xi32>
%11 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<2x8x32x32xi32>, tensor<8x2x32x32xi32>) outs(%10 : tensor<2x2x32x32xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_3: i32, %out: i32):
%12 = arith.muli %in, %in_3 : i32
%13 = arith.addi %out, %12 : i32
linalg.yield %13 : i32
} -> tensor<2x2x32x32xi32>
%unpack = tensor.unpack %11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%9 = tensor.empty() : tensor<2x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%10 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%11 = linalg.fill ins(%c0_i32 : i32) outs(%10 : tensor<2x2x32x32xi32>) -> tensor<2x2x32x32xi32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<2x8x32x32xi32>, tensor<8x2x32x32xi32>) outs(%11 : tensor<2x2x32x32xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_3: i32, %out: i32):
%13 = arith.muli %in, %in_3 : i32
%14 = arith.addi %out, %13 : i32
linalg.yield %14 : i32
} -> tensor<2x2x32x32xi32>
%unpack = tensor.unpack %12 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEPackAndTranspose (iree-amdaie-pack-and-transpose) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%9 = tensor.empty() : tensor<2x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%10 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%11 = linalg.fill ins(%c0_i32 : i32) outs(%10 : tensor<2x2x32x32xi32>) -> tensor<2x2x32x32xi32>
%12 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d2, d3, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d1, d5, d4)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d3, d4)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack, %pack_2 : tensor<2x8x32x32xi32>, tensor<8x2x32x32xi32>) outs(%11 : tensor<2x2x32x32xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_3: i32, %out: i32):
%13 = arith.muli %in, %in_3 : i32
%14 = arith.addi %out, %13 : i32
linalg.yield %14 : i32
} -> tensor<2x2x32x32xi32>
%unpack = tensor.unpack %12 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEPropagateDataLayout (iree-amdaie-propagate-data-layout) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%9 = tensor.empty() : tensor<2x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%10 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%11 = linalg.fill ins(%c0_i32 : i32) outs(%10 : tensor<2x2x32x32xi32>) -> tensor<2x2x32x32xi32>
%12 = tensor.empty() : tensor<2x8x8x4x4x8xi32>
%13 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %13 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%14 = tensor.empty() : tensor<8x2x4x8x4x8xi32>
%15 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %15 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%16 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%17 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%pack_5 = tensor.pack %11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %17 : tensor<2x2x32x32xi32> -> tensor<2x2x8x8x4x4xi32>
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<2x8x4x8x4x8xi32>, tensor<8x2x8x4x8x4xi32>) outs(%pack_5 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%19 = arith.muli %in, %in_7 : i32
%20 = arith.addi %out, %19 : i32
linalg.yield %20 : i32
} -> tensor<2x2x8x8x4x4xi32>
%unpack = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %11 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = linalg.fill ins(%c0_i32 : i32) outs(%9 : tensor<2x2x32x32xi32>) -> tensor<2x2x32x32xi32>
%11 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %11 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%12 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %12 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%13 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%pack_5 = tensor.pack %10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %13 : tensor<2x2x32x32xi32> -> tensor<2x2x8x8x4x4xi32>
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<2x8x4x8x4x8xi32>, tensor<8x2x8x4x8x4xi32>) outs(%pack_5 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%15 = arith.muli %in, %in_7 : i32
%16 = arith.addi %out, %15 : i32
linalg.yield %16 : i32
} -> tensor<2x2x8x8x4x4xi32>
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %10 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%12 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<2x8x4x8x4x8xi32>, tensor<8x2x8x4x8x4xi32>) outs(%13 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_6: i32, %out: i32):
%15 = arith.muli %in, %in_6 : i32
%16 = arith.addi %out, %15 : i32
linalg.yield %16 : i32
} -> tensor<2x2x8x8x4x4xi32>
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_5 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_5 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%12 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<2x8x4x8x4x8xi32>, tensor<8x2x8x4x8x4xi32>) outs(%13 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_6: i32, %out: i32):
%15 = arith.muli %in, %in_6 : i32
%16 = arith.addi %out, %15 : i32
linalg.yield %16 : i32
} -> tensor<2x2x8x8x4x4xi32>
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_5 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_5 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%12 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%alloc_5 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%14 = linalg.fill ins(%c0_i32 : i32) outs(%13 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<2x8x4x8x4x8xi32>, tensor<8x2x8x4x8x4xi32>) outs(%14 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%16 = arith.muli %in, %in_7 : i32
%17 = arith.addi %out, %16 : i32
linalg.yield %17 : i32
} -> tensor<2x2x8x8x4x4xi32>
%unpack = tensor.unpack %15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%12 = tensor.empty() : tensor<2x2x8x8x4x4xi32>
%alloc_5 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%14 = linalg.fill ins(%c0_i32 : i32) outs(%13 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_3, %pack_4 : tensor<2x8x4x8x4x8xi32>, tensor<8x2x8x4x8x4xi32>) outs(%14 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_8: i32, %out: i32):
%17 = arith.muli %in, %in_8 : i32
%18 = arith.addi %out, %17 : i32
linalg.yield %18 : i32
} -> tensor<2x2x8x8x4x4xi32>
%c0_6 = arith.constant 0 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%16 = scf.for %arg3 = %c0_6 to %c8 step %c1 iter_args(%arg4 = %14) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_8 = tensor.extract_slice %pack_3[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%extracted_slice_9 = tensor.extract_slice %pack_4[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%extracted_slice_10 = tensor.extract_slice %arg4[0, 0, 0, 0, 0, 0] [2, 2, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<2x2x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_8, %extracted_slice_9 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%extracted_slice_10 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_11: i32, %out: i32):
%18 = arith.muli %in, %in_11 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<2x2x8x8x4x4xi32>
%inserted_slice = tensor.insert_slice %17 into %arg4[0, 0, 0, 0, 0, 0] [2, 2, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_7 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%alloc_5 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_7 = tensor.extract_slice %pack_3[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%extracted_slice_8 = tensor.extract_slice %pack_4[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_7, %extracted_slice_8 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_9: i32, %out: i32):
%16 = arith.muli %in, %in_9 : i32
%17 = arith.addi %out, %16 : i32
linalg.yield %17 : i32
} -> tensor<2x2x8x8x4x4xi32>
scf.yield %15 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%alloc_5 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_7 = tensor.extract_slice %pack_3[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%extracted_slice_8 = tensor.extract_slice %pack_4[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_7, %extracted_slice_8 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_9: i32, %out: i32):
%16 = arith.muli %in, %in_9 : i32
%17 = arith.addi %out, %16 : i32
linalg.yield %17 : i32
} -> tensor<2x2x8x8x4x4xi32>
scf.yield %15 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEFusePackIntoLoop (iree-amdaie-fuse-pack-into-loop) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%alloc_5 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_7 = tensor.extract_slice %pack_3[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%extracted_slice_8 = tensor.extract_slice %pack_4[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%15 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_7, %extracted_slice_8 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_9: i32, %out: i32):
%16 = arith.muli %in, %in_9 : i32
%17 = arith.addi %out, %16 : i32
linalg.yield %17 : i32
} -> tensor<2x2x8x8x4x4xi32>
scf.yield %15 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%pack = tensor.pack %extracted_slice inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %7 : tensor<64x256xi32> -> tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%pack_2 = tensor.pack %extracted_slice_0 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %8 : tensor<256x64xi32> -> tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%pack_3 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x8x32x32xi32> -> tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%pack_4 = tensor.pack %pack_2 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<8x2x32x32xi32> -> tensor<8x2x8x4x8x4xi32>
%alloc_5 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, %15] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%extracted_slice_8 = tensor.extract_slice %7[0, %arg3, 0, 0] [2, 1, 32, 32] [1, 1, 1, 1] : tensor<2x8x32x32xi32> to tensor<2x1x32x32xi32>
%pack_9 = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_8 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %pack[0, %arg3, 0, 0] [2, 1, 32, 32] [1, 1, 1, 1] : tensor<2x8x32x32xi32> to tensor<2x1x32x32xi32>
%extracted_slice_11 = tensor.extract_slice %10[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%pack_12 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_11 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_13 = tensor.extract_slice %pack_3[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice_0[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%extracted_slice_15 = tensor.extract_slice %8[%arg3, 0, 0, 0] [1, 2, 32, 32] [1, 1, 1, 1] : tensor<8x2x32x32xi32> to tensor<1x2x32x32xi32>
%pack_16 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%extracted_slice_17 = tensor.extract_slice %pack_2[%arg3, 0, 0, 0] [1, 2, 32, 32] [1, 1, 1, 1] : tensor<8x2x32x32xi32> to tensor<1x2x32x32xi32>
%extracted_slice_18 = tensor.extract_slice %11[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%pack_19 = tensor.pack %pack_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_18 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%extracted_slice_20 = tensor.extract_slice %pack_4[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_19 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_21: i32, %out: i32):
%18 = arith.muli %in, %in_21 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<2x2x8x8x4x4xi32>
scf.yield %17 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_6 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_6 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %15] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%extracted_slice_5 = tensor.extract_slice %7[0, %arg3, 0, 0] [2, 1, 32, 32] [1, 1, 1, 1] : tensor<2x8x32x32xi32> to tensor<2x1x32x32xi32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_5 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_6 = tensor.extract_slice %10[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%pack_7 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_6 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_8 = tensor.extract_slice %extracted_slice_0[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%extracted_slice_9 = tensor.extract_slice %8[%arg3, 0, 0, 0] [1, 2, 32, 32] [1, 1, 1, 1] : tensor<8x2x32x32xi32> to tensor<1x2x32x32xi32>
%pack_10 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_9 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%extracted_slice_11 = tensor.extract_slice %11[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%pack_12 = tensor.pack %pack_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_11 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_7, %pack_12 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%18 = arith.muli %in, %in_13 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<2x2x8x8x4x4xi32>
scf.yield %17 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %15] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%extracted_slice_5 = tensor.extract_slice %7[0, %arg3, 0, 0] [2, 1, 32, 32] [1, 1, 1, 1] : tensor<2x8x32x32xi32> to tensor<2x1x32x32xi32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_5 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_6 = tensor.extract_slice %10[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%pack_7 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_6 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_0[%15, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%extracted_slice_9 = tensor.extract_slice %8[%arg3, 0, 0, 0] [1, 2, 32, 32] [1, 1, 1, 1] : tensor<8x2x32x32xi32> to tensor<1x2x32x32xi32>
%pack_10 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_9 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%extracted_slice_11 = tensor.extract_slice %11[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%pack_12 = tensor.pack %pack_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_11 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%16 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_7, %pack_12 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%17 = arith.muli %in, %in_13 : i32
%18 = arith.addi %out, %17 : i32
linalg.yield %18 : i32
} -> tensor<2x2x8x8x4x4xi32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIETileAndFuse (iree-amdaie-tile-and-fuse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %15] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%extracted_slice_5 = tensor.extract_slice %7[0, %arg3, 0, 0] [2, 1, 32, 32] [1, 1, 1, 1] : tensor<2x8x32x32xi32> to tensor<2x1x32x32xi32>
%alloc_6 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%16 = bufferization.to_tensor %alloc_6 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %16 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_7 = tensor.extract_slice %10[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%pack_8 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_9 = tensor.extract_slice %extracted_slice_0[%15, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%extracted_slice_10 = tensor.extract_slice %8[%arg3, 0, 0, 0] [1, 2, 32, 32] [1, 1, 1, 1] : tensor<8x2x32x32xi32> to tensor<1x2x32x32xi32>
%alloc_11 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%17 = bufferization.to_tensor %alloc_11 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_12 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%extracted_slice_13 = tensor.extract_slice %11[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%pack_14 = tensor.pack %pack_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_8, %pack_14 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_15: i32, %out: i32):
%19 = arith.muli %in, %in_15 : i32
%20 = arith.addi %out, %19 : i32
linalg.yield %20 : i32
} -> tensor<2x2x8x8x4x4xi32>
memref.dealloc %alloc_6 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_11 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %18 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = tensor.empty() : tensor<2x8x32x32xi32>
%8 = tensor.empty() : tensor<8x2x32x32xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%9 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%10 = tensor.empty() : tensor<2x8x4x8x4x8xi32>
%11 = tensor.empty() : tensor<8x2x8x4x8x4xi32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%13 = linalg.fill ins(%c0_i32 : i32) outs(%12 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%15 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %15] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%extracted_slice_5 = tensor.extract_slice %7[0, %arg3, 0, 0] [2, 1, 32, 32] [1, 1, 1, 1] : tensor<2x8x32x32xi32> to tensor<2x1x32x32xi32>
%alloc_6 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%16 = bufferization.to_tensor %alloc_6 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %16 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_7 = tensor.extract_slice %10[0, %arg3, 0, 0, 0, 0] [2, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x8x4x8x4x8xi32> to tensor<2x1x4x8x4x8xi32>
%pack_8 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_7 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_9 = tensor.extract_slice %extracted_slice_0[%15, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%extracted_slice_10 = tensor.extract_slice %8[%arg3, 0, 0, 0] [1, 2, 32, 32] [1, 1, 1, 1] : tensor<8x2x32x32xi32> to tensor<1x2x32x32xi32>
%alloc_11 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%17 = bufferization.to_tensor %alloc_11 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_12 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%extracted_slice_13 = tensor.extract_slice %11[%arg3, 0, 0, 0, 0, 0] [1, 2, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<8x2x8x4x8x4xi32> to tensor<1x2x8x4x8x4xi32>
%pack_14 = tensor.pack %pack_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_8, %pack_14 : tensor<2x1x4x8x4x8xi32>, tensor<1x2x8x4x8x4xi32>) outs(%arg4 : tensor<2x2x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_15: i32, %out: i32):
%20 = arith.muli %in, %in_15 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<2x2x8x8x4x4xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_15 = tensor.extract_slice %pack_8[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_14[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%extracted_slice_17 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_15, %extracted_slice_16 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_17 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%21 = arith.muli %in, %in_18 : i32
%22 = arith.addi %out, %21 : i32
linalg.yield %22 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %20 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_6 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_11 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %9 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_8 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack_6[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%extracted_slice_12 = tensor.extract_slice %pack_10[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%extracted_slice_13 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_11, %extracted_slice_12 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_13 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%18 = arith.muli %in, %in_14 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_8 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_8 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack_6[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%extracted_slice_12 = tensor.extract_slice %pack_10[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%extracted_slice_13 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_11, %extracted_slice_12 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_13 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%18 = arith.muli %in, %in_14 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_8 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEFusePackIntoLoop (iree-amdaie-fuse-pack-into-loop) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_8 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack_6[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%extracted_slice_12 = tensor.extract_slice %pack_10[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%extracted_slice_13 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_11, %extracted_slice_12 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_13 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%18 = arith.muli %in, %in_14 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_8 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%pack_6 = tensor.pack %pack outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %10 : tensor<2x1x32x32xi32> -> tensor<2x1x4x8x4x8xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_8 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_8 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_7 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%pack_10 = tensor.pack %pack_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %11 : tensor<1x2x32x32xi32> -> tensor<1x2x8x4x8x4xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %10[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%pack_13 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_12 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_14 = tensor.extract_slice %pack_6[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%extracted_slice_15 = tensor.extract_slice %pack_9[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %11[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%pack_17 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_16 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %pack_10[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%18 = arith.muli %in, %in_20 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_8 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_6 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_7 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_7 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_8 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_9 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %10[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%pack_11 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_10 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_12 = tensor.extract_slice %pack_8[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_13 = tensor.extract_slice %11[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%pack_14 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_15 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_11, %pack_14 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_15 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_16: i32, %out: i32):
%18 = arith.muli %in, %in_16 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_7 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_6 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_7 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_7 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_8 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_9 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %10[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%pack_11 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %extracted_slice_10 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_12 = tensor.extract_slice %pack_8[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_13 = tensor.extract_slice %11[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%pack_14 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %extracted_slice_13 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_15 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_11, %pack_14 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_15 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_16: i32, %out: i32):
%18 = arith.muli %in, %in_16 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_7 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before HoistStaticallyBoundAllocations (iree-hoist-statically-bound-allocations) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_0 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_1 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%alloc = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%7 = bufferization.to_tensor %alloc restrict writable : memref<2x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%8 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_4 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%14 = bufferization.to_tensor %alloc_5 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_6 = tensor.extract_slice %extracted_slice_0[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%alloc_7 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%15 = bufferization.to_tensor %alloc_7 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_8 = tensor.pack %extracted_slice_6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_9 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %10[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%alloc_11 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%17 = bufferization.to_tensor %alloc_11 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_12 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_13 = tensor.extract_slice %pack_8[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_14 = tensor.extract_slice %11[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%alloc_15 = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%18 = bufferization.to_tensor %alloc_15 restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_16 = tensor.pack %extracted_slice_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_17 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_16 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_17 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%20 = arith.muli %in, %in_18 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
memref.dealloc %alloc_11 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc_15 : memref<1x1x8x4x8x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_7 : memref<1x2x32x32xi32, 1 : i32>
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_3 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_1 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
memref.dealloc %alloc : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x2x8x8x4x4xi32, 2 : i32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_3 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%c1 = arith.constant 1 : index
%c8 = arith.constant 8 : index
%c0_i32 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = tensor.empty() : tensor<2x1x4x8x4x8xi32>
%11 = tensor.empty() : tensor<1x2x8x4x8x4xi32>
%12 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_8 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_8 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_9 = tensor.extract_slice %extracted_slice_5[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_10 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %10[%arg5, 0, 0, 0, 0, 0] [1, 1, 4, 8, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<2x1x4x8x4x8xi32> to tensor<1x1x4x8x4x8xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_13 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_14 = tensor.extract_slice %pack_10[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_15 = tensor.extract_slice %11[0, %arg6, 0, 0, 0, 0] [1, 1, 8, 4, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x2x8x4x8x4xi32> to tensor<1x1x8x4x8x4xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_16 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_17 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_16 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_17 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%20 = arith.muli %in, %in_18 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_7 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%11 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_8 = tensor.extract_slice %extracted_slice[0, %11] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_8 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %12 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_9 = tensor.extract_slice %extracted_slice_5[%11, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%13 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_10 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%14 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%15 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_12 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %15 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_13 = tensor.extract_slice %pack_10[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_14 = tensor.pack %extracted_slice_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_15 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_14 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_15 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_16: i32, %out: i32):
%18 = arith.muli %in, %in_16 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %14 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_7 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEPeelForLoop (iree-amdaie-peel-for-loop) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%10 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%11 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_8 = tensor.extract_slice %extracted_slice[0, %11] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%12 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_8 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %12 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_9 = tensor.extract_slice %extracted_slice_5[%11, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%13 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_10 = tensor.pack %extracted_slice_9 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%14 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_11 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%15 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_12 = tensor.pack %extracted_slice_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %15 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_13 = tensor.extract_slice %pack_10[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_14 = tensor.pack %extracted_slice_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_15 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%17 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_12, %pack_14 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_15 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_16: i32, %out: i32):
%18 = arith.muli %in, %in_16 : i32
%19 = arith.addi %out, %18 : i32
linalg.yield %19 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %17 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %14 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_7 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_7 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%c1_7 = arith.constant 1 : index
%10 = scf.for %arg3 = %c0 to %c1_7 step %c1 iter_args(%arg4 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_9 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %extracted_slice_5[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_12 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_14 = tensor.extract_slice %pack_11[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_16 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_15 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_16 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%20 = arith.muli %in, %in_17 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%c7 = arith.constant 7 : index
%11 = scf.for %arg3 = %c1_7 to %c7 step %c1 iter_args(%arg4 = %10) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_9 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %extracted_slice_5[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_12 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_14 = tensor.extract_slice %pack_11[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_16 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_15 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_16 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%20 = arith.muli %in, %in_17 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%12 = scf.for %arg3 = %c7 to %c8 step %c1 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%13 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_9 = tensor.extract_slice %extracted_slice[0, %13] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_10 = tensor.extract_slice %extracted_slice_5[%13, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_12 = tensor.extract_slice %pack[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_14 = tensor.extract_slice %pack_11[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_16 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_13, %pack_15 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_16 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%20 = arith.muli %in, %in_17 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %16 : tensor<2x2x8x8x4x4xi32>
}
%unpack = tensor.unpack %12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_8 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_8 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEFuseFillIntoForall (iree-amdaie-fuse-fill-into-forall) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%11 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %11 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%12 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %9) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_15 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_16 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_17 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_16, %pack_18 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%20 = arith.muli %in, %in_20 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%13 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %12) -> (tensor<2x2x8x8x4x4xi32>) {
%17 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_15 = tensor.extract_slice %extracted_slice[0, %17] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%18 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_16 = tensor.pack %extracted_slice_15 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_17 = tensor.extract_slice %extracted_slice_5[%17, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%19 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %19 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%20 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_19 = tensor.extract_slice %pack_16[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_20 = tensor.pack %extracted_slice_19 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_21 = tensor.extract_slice %pack_18[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%22 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_22 = tensor.pack %extracted_slice_21 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %22 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_23 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_20, %pack_22 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_23 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_24: i32, %out: i32):
%24 = arith.muli %in, %in_24 : i32
%25 = arith.addi %out, %24 : i32
linalg.yield %25 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %23 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %20 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_15 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_16 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_17 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_16, %pack_18 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%20 = arith.muli %in, %in_20 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_14 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_14 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEFuseConsumerIntoLoop (iree-amdaie-fuse-consumer-into-loop) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%11 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %11 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%12 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_15 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_16 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_17 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_19 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%extracted_slice_20 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_16, %pack_18 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_21: i32, %out: i32):
%21 = arith.muli %in, %in_21 : i32
%22 = arith.addi %out, %21 : i32
linalg.yield %22 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %20 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%13 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %12) -> (tensor<2x2x8x8x4x4xi32>) {
%17 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_15 = tensor.extract_slice %extracted_slice[0, %17] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%18 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_16 = tensor.pack %extracted_slice_15 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_17 = tensor.extract_slice %extracted_slice_5[%17, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%19 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %19 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%20 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_19 = tensor.extract_slice %pack_16[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_20 = tensor.pack %extracted_slice_19 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_21 = tensor.extract_slice %pack_18[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%22 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_22 = tensor.pack %extracted_slice_21 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %22 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_23 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_20, %pack_22 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_23 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_24: i32, %out: i32):
%24 = arith.muli %in, %in_24 : i32
%25 = arith.addi %out, %24 : i32
linalg.yield %25 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %23 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %20 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %13) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_15 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_16 = tensor.pack %extracted_slice_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_17 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_18 = tensor.pack %extracted_slice_17 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_16, %pack_18 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%20 = arith.muli %in, %in_20 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %7 : tensor<2x2x8x8x4x4xi32> -> tensor<2x2x32x32xi32>
%unpack_14 = tensor.unpack %unpack inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_14 into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEFusePackIntoLoop (iree-amdaie-fuse-pack-into-loop) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%11 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %11 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%12 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%21 = arith.muli %in, %in_20 : i32
%22 = arith.addi %out, %21 : i32
linalg.yield %22 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %20 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%13 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %12) -> (tensor<2x2x8x8x4x4xi32>) {
%17 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %17] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%18 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%17, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%19 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %19 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%20 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%22 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %22 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_19, %pack_21 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_22 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_23: i32, %out: i32):
%24 = arith.muli %in, %in_23 : i32
%25 = arith.addi %out, %24 : i32
linalg.yield %25 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %23 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %20 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %13, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%20 = arith.muli %in, %in_22 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
%inserted_slice = tensor.insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_20 = tensor.extract_slice %inserted_slice[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%unpack_21 = tensor.unpack %19 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_21 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %16#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEBufferizeToAllocation (iree-amdaie-bufferize-to-allocation) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%11 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %11 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%12 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%21 = arith.muli %in, %in_20 : i32
%22 = arith.addi %out, %21 : i32
linalg.yield %22 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %20 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%13 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %12) -> (tensor<2x2x8x8x4x4xi32>) {
%17 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %17] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%18 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%17, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%19 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %19 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%20 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%22 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %22 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_19, %pack_21 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_22 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_23: i32, %out: i32):
%24 = arith.muli %in, %in_23 : i32
%25 = arith.addi %out, %24 : i32
linalg.yield %25 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %23 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %20 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %13, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%20 = arith.muli %in, %in_22 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
%inserted_slice = tensor.insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_20 = tensor.extract_slice %inserted_slice[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%unpack_21 = tensor.unpack %19 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_21 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %16#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIELowerToUKernels (iree-amdaie-lower-to-ukernels) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%9 = linalg.fill ins(%c0_i32 : i32) outs(%8 : tensor<2x2x8x8x4x4xi32>) -> tensor<2x2x8x8x4x4xi32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%10 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%11 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %11 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%12 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%20 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%19 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%21 = arith.muli %in, %in_20 : i32
%22 = arith.addi %out, %21 : i32
linalg.yield %22 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %20 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%13 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %12) -> (tensor<2x2x8x8x4x4xi32>) {
%17 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %17] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%18 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%17, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%19 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %19 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%20 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%22 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %22 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%23 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_19, %pack_21 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_22 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_23: i32, %out: i32):
%24 = arith.muli %in, %in_23 : i32
%25 = arith.addi %out, %24 : i32
linalg.yield %25 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %23 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %20 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%14 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%15 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %15 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%16:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %13, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%18 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %18 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%20 = arith.muli %in, %in_22 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
%inserted_slice = tensor.insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%extracted_slice_20 = tensor.extract_slice %inserted_slice[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%unpack_21 = tensor.unpack %19 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_21 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %16#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIECleanup (iree-amdaie-cleanup) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%9 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%10 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%11 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%20 = arith.muli %in, %in_19 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%12 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %16] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%17 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%18 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%20 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%22 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_19, %pack_21 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_22 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_23: i32, %out: i32):
%23 = arith.muli %in, %in_23 : i32
%24 = arith.addi %out, %23 : i32
linalg.yield %24 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %22 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%13 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%14 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%15:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %12, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_21: i32, %out: i32):
%19 = arith.muli %in, %in_21 : i32
%20 = arith.addi %out, %19 : i32
linalg.yield %20 : i32
} -> tensor<1x1x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%unpack_20 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_20 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %18 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %15#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEInsertLoopsForVectorization (iree-amdaie-insert-loops-for-vectorization) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c7 = arith.constant 7 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%9 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%10 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%11 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%19 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%20 = arith.muli %in, %in_19 : i32
%21 = arith.addi %out, %20 : i32
linalg.yield %21 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%12 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %16] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%17 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%18 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%20 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%22 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_19, %pack_21 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_22 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_23: i32, %out: i32):
%23 = arith.muli %in, %in_23 : i32
%24 = arith.addi %out, %23 : i32
linalg.yield %24 : i32
} -> tensor<1x1x8x8x4x4xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %22 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%13 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%14 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%15:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %12, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%pack_15, %pack_17 : tensor<1x1x4x8x4x8xi32>, tensor<1x1x8x4x8x4xi32>) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_21: i32, %out: i32):
%19 = arith.muli %in, %in_21 : i32
%20 = arith.addi %out, %19 : i32
linalg.yield %20 : i32
} -> tensor<1x1x8x8x4x4xi32>
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%unpack_20 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_20 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %18 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %15#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before AMDAIEVectorization (iree-amdaie-vectorization) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c1 = arith.constant 1 : index
%c1_0 = arith.constant 1 : index
%c1_1 = arith.constant 1 : index
%c1_2 = arith.constant 1 : index
%c1_3 = arith.constant 1 : index
%c1_4 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c0_5 = arith.constant 0 : index
%c0_6 = arith.constant 0 : index
%c1_7 = arith.constant 1 : index
%c1_8 = arith.constant 1 : index
%c1_9 = arith.constant 1 : index
%c1_10 = arith.constant 1 : index
%c1_11 = arith.constant 1 : index
%c1_12 = arith.constant 1 : index
%c0_13 = arith.constant 0 : index
%c0_14 = arith.constant 0 : index
%c0_15 = arith.constant 0 : index
%c1_16 = arith.constant 1 : index
%c1_17 = arith.constant 1 : index
%c1_18 = arith.constant 1 : index
%c1_19 = arith.constant 1 : index
%c1_20 = arith.constant 1 : index
%c1_21 = arith.constant 1 : index
%c0_22 = arith.constant 0 : index
%c0_23 = arith.constant 0 : index
%c0_24 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_25 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%c1_26 = arith.constant 1 : index
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_27 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_28 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_29 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_30 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_31 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0_25) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0_25) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0_25) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_32 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_33 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_31 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_30 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%extracted_slice_34 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%9 = bufferization.to_tensor %alloc_29 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_34 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_35 = tensor.extract_slice %extracted_slice_32[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%10 = bufferization.to_tensor %alloc_28 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_36 = tensor.pack %extracted_slice_35 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%11 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_41 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_27 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_42 = tensor.pack %extracted_slice_41 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_43 = tensor.extract_slice %pack_36[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_44 = tensor.pack %extracted_slice_43 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_45 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_45 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%c0_46 = arith.constant 0 : index
%c1_47 = arith.constant 1 : index
%c1_48 = arith.constant 1 : index
%c0_49 = arith.constant 0 : index
%c1_50 = arith.constant 1 : index
%c1_51 = arith.constant 1 : index
%c0_52 = arith.constant 0 : index
%c1_53 = arith.constant 1 : index
%c1_54 = arith.constant 1 : index
%c0_55 = arith.constant 0 : index
%c8 = arith.constant 8 : index
%c1_56 = arith.constant 1 : index
%c0_57 = arith.constant 0 : index
%c8_58 = arith.constant 8 : index
%c1_59 = arith.constant 1 : index
%c0_60 = arith.constant 0 : index
%c4 = arith.constant 4 : index
%c1_61 = arith.constant 1 : index
%19 = scf.for %arg6 = %c0_46 to %c1_47 step %c1_48 iter_args(%arg7 = %18) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg8 = %c0_49 to %c1_50 step %c1_51 iter_args(%arg9 = %arg7) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg10 = %c0_52 to %c1_53 step %c1_54 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg12 = %c0_55 to %c8 step %c1_56 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg14 = %c0_57 to %c8_58 step %c1_59 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg16 = %c0_60 to %c4 step %c1_61 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_62 = tensor.extract_slice %pack_42[%arg6, %arg10, %arg16, %arg12, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_63 = tensor.extract_slice %pack_44[%arg10, %arg8, %arg14, %arg16, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_64 = tensor.extract_slice %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%25 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_62, %extracted_slice_63 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_64 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_65: i32, %out: i32):
%26 = arith.muli %in, %in_65 : i32
%27 = arith.addi %out, %26 : i32
linalg.yield %27 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %25 into %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%12 = scf.for %arg3 = %c1_26 to %c7 step %c1_26 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_41 = tensor.extract_slice %extracted_slice[0, %16] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%17 = bufferization.to_tensor %alloc_29 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_42 = tensor.pack %extracted_slice_41 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_43 = tensor.extract_slice %extracted_slice_32[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%18 = bufferization.to_tensor %alloc_28 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_44 = tensor.pack %extracted_slice_43 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_45 = tensor.extract_slice %pack_42[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%20 = bufferization.to_tensor %alloc_27 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_46 = tensor.pack %extracted_slice_45 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_47 = tensor.extract_slice %pack_44[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_48 = tensor.pack %extracted_slice_47 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_49 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%c0_50 = arith.constant 0 : index
%c1_51 = arith.constant 1 : index
%c1_52 = arith.constant 1 : index
%c0_53 = arith.constant 0 : index
%c1_54 = arith.constant 1 : index
%c1_55 = arith.constant 1 : index
%c0_56 = arith.constant 0 : index
%c1_57 = arith.constant 1 : index
%c1_58 = arith.constant 1 : index
%c0_59 = arith.constant 0 : index
%c8 = arith.constant 8 : index
%c1_60 = arith.constant 1 : index
%c0_61 = arith.constant 0 : index
%c8_62 = arith.constant 8 : index
%c1_63 = arith.constant 1 : index
%c0_64 = arith.constant 0 : index
%c4 = arith.constant 4 : index
%c1_65 = arith.constant 1 : index
%22 = scf.for %arg8 = %c0_50 to %c1_51 step %c1_52 iter_args(%arg9 = %extracted_slice_49) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg10 = %c0_53 to %c1_54 step %c1_55 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg12 = %c0_56 to %c1_57 step %c1_58 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%25 = scf.for %arg14 = %c0_59 to %c8 step %c1_60 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%26 = scf.for %arg16 = %c0_61 to %c8_62 step %c1_63 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%27 = scf.for %arg18 = %c0_64 to %c4 step %c1_65 iter_args(%arg19 = %arg17) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_66 = tensor.extract_slice %pack_46[%arg8, %arg12, %arg18, %arg14, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_67 = tensor.extract_slice %pack_48[%arg12, %arg10, %arg16, %arg18, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_68 = tensor.extract_slice %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%28 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_66, %extracted_slice_67 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_68 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_69: i32, %out: i32):
%29 = arith.muli %in, %in_69 : i32
%30 = arith.addi %out, %29 : i32
linalg.yield %30 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %28 into %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %27 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %26 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %25 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %22 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_37 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%13 = bufferization.to_tensor %alloc_29 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_38 = tensor.pack %extracted_slice_37 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_39 = tensor.extract_slice %extracted_slice_32[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%14 = bufferization.to_tensor %alloc_28 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_40 = tensor.pack %extracted_slice_39 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%15:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %12, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_41 = tensor.extract_slice %pack_38[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_27 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_42 = tensor.pack %extracted_slice_41 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_43 = tensor.extract_slice %pack_40[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_44 = tensor.pack %extracted_slice_43 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_45 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%c0_46 = arith.constant 0 : index
%c1_47 = arith.constant 1 : index
%c1_48 = arith.constant 1 : index
%c0_49 = arith.constant 0 : index
%c1_50 = arith.constant 1 : index
%c1_51 = arith.constant 1 : index
%c0_52 = arith.constant 0 : index
%c1_53 = arith.constant 1 : index
%c1_54 = arith.constant 1 : index
%c0_55 = arith.constant 0 : index
%c8 = arith.constant 8 : index
%c1_56 = arith.constant 1 : index
%c0_57 = arith.constant 0 : index
%c8_58 = arith.constant 8 : index
%c1_59 = arith.constant 1 : index
%c0_60 = arith.constant 0 : index
%c4 = arith.constant 4 : index
%c1_61 = arith.constant 1 : index
%18 = scf.for %arg7 = %c0_46 to %c1_47 step %c1_48 iter_args(%arg8 = %extracted_slice_45) -> (tensor<1x1x8x8x4x4xi32>) {
%19 = scf.for %arg9 = %c0_49 to %c1_50 step %c1_51 iter_args(%arg10 = %arg8) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg11 = %c0_52 to %c1_53 step %c1_54 iter_args(%arg12 = %arg10) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg13 = %c0_55 to %c8 step %c1_56 iter_args(%arg14 = %arg12) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg15 = %c0_57 to %c8_58 step %c1_59 iter_args(%arg16 = %arg14) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg17 = %c0_60 to %c4 step %c1_61 iter_args(%arg18 = %arg16) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_64 = tensor.extract_slice %pack_42[%arg7, %arg11, %arg17, %arg13, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_65 = tensor.extract_slice %pack_44[%arg11, %arg9, %arg15, %arg17, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_66 = tensor.extract_slice %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_64, %extracted_slice_65 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_66 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%25 = arith.muli %in, %in_67 : i32
%26 = arith.addi %out, %25 : i32
linalg.yield %26 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %24 into %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %19 : tensor<1x1x8x8x4x4xi32>
}
%extracted_slice_62 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%unpack_63 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_62 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_63 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %18 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %15#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_33 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_31 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_30 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_29 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_28 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_27 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before EliminateEmptyTensors (iree-eliminate-empty-tensors) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = tensor.empty() : tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%9 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%10 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%11 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%19 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %18) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg16 = %c0 to %c4 step %c1 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_19 = tensor.extract_slice %pack_15[%arg6, %arg10, %arg16, %arg12, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[%arg10, %arg8, %arg14, %arg16, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_21 = tensor.extract_slice %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%25 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_19, %extracted_slice_20 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_21 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%26 = arith.muli %in, %in_22 : i32
%27 = arith.addi %out, %26 : i32
linalg.yield %27 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %25 into %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%12 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %16] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%17 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%18 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%20 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%22 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %extracted_slice_22) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg12 = %c0 to %c1 step %c1 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%25 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%26 = scf.for %arg16 = %c0 to %c8 step %c1 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%27 = scf.for %arg18 = %c0 to %c4 step %c1 iter_args(%arg19 = %arg17) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_23 = tensor.extract_slice %pack_19[%arg8, %arg12, %arg18, %arg14, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_24 = tensor.extract_slice %pack_21[%arg12, %arg10, %arg16, %arg18, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_25 = tensor.extract_slice %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%28 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_23, %extracted_slice_24 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_25 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_26: i32, %out: i32):
%29 = arith.muli %in, %in_26 : i32
%30 = arith.addi %out, %29 : i32
linalg.yield %30 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %28 into %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %27 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %26 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %25 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %22 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%13 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%14 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%15:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %12, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = scf.for %arg7 = %c0 to %c1 step %c1 iter_args(%arg8 = %extracted_slice_18) -> (tensor<1x1x8x8x4x4xi32>) {
%19 = scf.for %arg9 = %c0 to %c1 step %c1 iter_args(%arg10 = %arg8) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg11 = %c0 to %c1 step %c1 iter_args(%arg12 = %arg10) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg13 = %c0 to %c8 step %c1 iter_args(%arg14 = %arg12) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg15 = %c0 to %c8 step %c1 iter_args(%arg16 = %arg14) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg17 = %c0 to %c4 step %c1 iter_args(%arg18 = %arg16) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_21 = tensor.extract_slice %pack_15[%arg7, %arg11, %arg17, %arg13, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_22 = tensor.extract_slice %pack_17[%arg11, %arg9, %arg15, %arg17, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_23 = tensor.extract_slice %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_21, %extracted_slice_22 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_23 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_24: i32, %out: i32):
%25 = arith.muli %in, %in_24 : i32
%26 = arith.addi %out, %25 : i32
linalg.yield %26 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %24 into %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %19 : tensor<1x1x8x8x4x4xi32>
}
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%unpack_20 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_20 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %18 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %15#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before EmptyTensorToAllocTensor (empty-tensor-to-alloc-tensor) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>> -> tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%9 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%10 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%11 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%19 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %18) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg16 = %c0 to %c4 step %c1 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_19 = tensor.extract_slice %pack_15[%arg6, %arg10, %arg16, %arg12, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[%arg10, %arg8, %arg14, %arg16, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_21 = tensor.extract_slice %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%25 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_19, %extracted_slice_20 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_21 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%26 = arith.muli %in, %in_22 : i32
%27 = arith.addi %out, %26 : i32
linalg.yield %27 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %25 into %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%12 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %16] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%17 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%18 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%20 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%22 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %extracted_slice_22) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg12 = %c0 to %c1 step %c1 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%25 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%26 = scf.for %arg16 = %c0 to %c8 step %c1 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%27 = scf.for %arg18 = %c0 to %c4 step %c1 iter_args(%arg19 = %arg17) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_23 = tensor.extract_slice %pack_19[%arg8, %arg12, %arg18, %arg14, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_24 = tensor.extract_slice %pack_21[%arg12, %arg10, %arg16, %arg18, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_25 = tensor.extract_slice %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%28 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_23, %extracted_slice_24 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_25 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_26: i32, %out: i32):
%29 = arith.muli %in, %in_26 : i32
%30 = arith.addi %out, %29 : i32
linalg.yield %30 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %28 into %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %27 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %26 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %25 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %22 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%13 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%14 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%15:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %12, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = scf.for %arg7 = %c0 to %c1 step %c1 iter_args(%arg8 = %extracted_slice_18) -> (tensor<1x1x8x8x4x4xi32>) {
%19 = scf.for %arg9 = %c0 to %c1 step %c1 iter_args(%arg10 = %arg8) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg11 = %c0 to %c1 step %c1 iter_args(%arg12 = %arg10) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg13 = %c0 to %c8 step %c1 iter_args(%arg14 = %arg12) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg15 = %c0 to %c8 step %c1 iter_args(%arg16 = %arg14) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg17 = %c0 to %c4 step %c1 iter_args(%arg18 = %arg16) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_21 = tensor.extract_slice %pack_15[%arg7, %arg11, %arg17, %arg13, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_22 = tensor.extract_slice %pack_17[%arg11, %arg9, %arg15, %arg17, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_23 = tensor.extract_slice %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_21, %extracted_slice_22 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_23 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_24: i32, %out: i32):
%25 = arith.muli %in, %in_24 : i32
%26 = arith.addi %out, %25 : i32
linalg.yield %26 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %24 into %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %19 : tensor<1x1x8x8x4x4xi32>
}
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%unpack_20 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_20 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %18 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %15#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before IREEComprehensiveBufferize (iree-codegen-iree-comprehensive-bufferize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<128x256xi32>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<256x128xi32>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
%3 = flow.dispatch.tensor.load %0, offsets = [0, 0], sizes = [128, 256], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<128x256xi32>> -> tensor<128x256xi32>
%4 = flow.dispatch.tensor.load %1, offsets = [0, 0], sizes = [256, 128], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<256x128xi32>> -> tensor<256x128xi32>
%5 = flow.dispatch.tensor.load %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : !flow.dispatch.tensor<writeonly:tensor<128x128xi32>> -> tensor<128x128xi32>
%6 = scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) shared_outs(%arg2 = %5) -> (tensor<128x128xi32>) {
%extracted_slice = tensor.extract_slice %3[%arg0, 0] [64, 256] [1, 1] : tensor<128x256xi32> to tensor<64x256xi32>
%extracted_slice_5 = tensor.extract_slice %4[0, %arg1] [256, 64] [1, 1] : tensor<256x128xi32> to tensor<256x64xi32>
%extracted_slice_6 = tensor.extract_slice %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<128x128xi32> to tensor<64x64xi32>
%7 = bufferization.to_tensor %alloc_4 restrict writable : memref<2x2x32x32xi32, 1 : i32>
%8 = bufferization.to_tensor %alloc_3 restrict writable : memref<2x2x8x8x4x4xi32, 2 : i32>
%extracted_slice_7 = tensor.extract_slice %extracted_slice[0, 0] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%9 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack = tensor.pack %extracted_slice_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %9 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_8 = tensor.extract_slice %extracted_slice_5[0, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%10 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_9 = tensor.pack %extracted_slice_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %10 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%11 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %8) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_9[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_18 : tensor<1x1x8x8x4x4xi32>) -> tensor<1x1x8x8x4x4xi32>
%19 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %18) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg16 = %c0 to %c4 step %c1 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_19 = tensor.extract_slice %pack_15[%arg6, %arg10, %arg16, %arg12, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[%arg10, %arg8, %arg14, %arg16, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_21 = tensor.extract_slice %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%25 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_19, %extracted_slice_20 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_21 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%26 = arith.muli %in, %in_22 : i32
%27 = arith.addi %out, %26 : i32
linalg.yield %27 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %25 into %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %19 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%12 = scf.for %arg3 = %c1 to %c7 step %c1 iter_args(%arg4 = %11) -> (tensor<2x2x8x8x4x4xi32>) {
%16 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg3)
%extracted_slice_14 = tensor.extract_slice %extracted_slice[0, %16] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%17 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_15 = tensor.pack %extracted_slice_14 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %17 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_16 = tensor.extract_slice %extracted_slice_5[%16, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%18 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %18 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%19 = scf.forall (%arg5, %arg6) in (2, 2) shared_outs(%arg7 = %arg4) -> (tensor<2x2x8x8x4x4xi32>) {
%extracted_slice_18 = tensor.extract_slice %pack_15[%arg5, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%20 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_19 = tensor.pack %extracted_slice_18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %20 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_20 = tensor.extract_slice %pack_17[0, %arg6, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%21 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_21 = tensor.pack %extracted_slice_20 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %21 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_22 = tensor.extract_slice %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%22 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %extracted_slice_22) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (tensor<1x1x8x8x4x4xi32>) {
%24 = scf.for %arg12 = %c0 to %c1 step %c1 iter_args(%arg13 = %arg11) -> (tensor<1x1x8x8x4x4xi32>) {
%25 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (tensor<1x1x8x8x4x4xi32>) {
%26 = scf.for %arg16 = %c0 to %c8 step %c1 iter_args(%arg17 = %arg15) -> (tensor<1x1x8x8x4x4xi32>) {
%27 = scf.for %arg18 = %c0 to %c4 step %c1 iter_args(%arg19 = %arg17) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_23 = tensor.extract_slice %pack_19[%arg8, %arg12, %arg18, %arg14, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_24 = tensor.extract_slice %pack_21[%arg12, %arg10, %arg16, %arg18, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_25 = tensor.extract_slice %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%28 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_23, %extracted_slice_24 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_25 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_26: i32, %out: i32):
%29 = arith.muli %in, %in_26 : i32
%30 = arith.addi %out, %29 : i32
linalg.yield %30 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %28 into %arg19[%arg8, %arg10, %arg16, %arg14, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %27 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %26 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %25 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %24 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.forall.in_parallel {
tensor.parallel_insert_slice %22 into %arg7[%arg5, %arg6, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %19 : tensor<2x2x8x8x4x4xi32>
}
%extracted_slice_10 = tensor.extract_slice %extracted_slice[0, 224] [64, 32] [1, 1] : tensor<64x256xi32> to tensor<64x32xi32>
%13 = bufferization.to_tensor %alloc_2 restrict writable : memref<2x1x32x32xi32, 1 : i32>
%pack_11 = tensor.pack %extracted_slice_10 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %13 : tensor<64x32xi32> -> tensor<2x1x32x32xi32>
%extracted_slice_12 = tensor.extract_slice %extracted_slice_5[224, 0] [32, 64] [1, 1] : tensor<256x64xi32> to tensor<32x64xi32>
%14 = bufferization.to_tensor %alloc_1 restrict writable : memref<1x2x32x32xi32, 1 : i32>
%pack_13 = tensor.pack %extracted_slice_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %14 : tensor<32x64xi32> -> tensor<1x2x32x32xi32>
%15:2 = scf.forall (%arg3, %arg4) in (2, 2) shared_outs(%arg5 = %12, %arg6 = %7) -> (tensor<2x2x8x8x4x4xi32>, tensor<2x2x32x32xi32>) {
%extracted_slice_14 = tensor.extract_slice %pack_11[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x1x32x32xi32> to tensor<1x1x32x32xi32>
%16 = bufferization.to_tensor %alloc_0 restrict writable : memref<1x1x4x8x4x8xi32, 2 : i32>
%pack_15 = tensor.pack %extracted_slice_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %16 : tensor<1x1x32x32xi32> -> tensor<1x1x4x8x4x8xi32>
%extracted_slice_16 = tensor.extract_slice %pack_13[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x2x32x32xi32> to tensor<1x1x32x32xi32>
%17 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x8x4xi32, 2 : i32>
%pack_17 = tensor.pack %extracted_slice_16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %17 : tensor<1x1x32x32xi32> -> tensor<1x1x8x4x8x4xi32>
%extracted_slice_18 = tensor.extract_slice %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<2x2x8x8x4x4xi32> to tensor<1x1x8x8x4x4xi32>
%18 = scf.for %arg7 = %c0 to %c1 step %c1 iter_args(%arg8 = %extracted_slice_18) -> (tensor<1x1x8x8x4x4xi32>) {
%19 = scf.for %arg9 = %c0 to %c1 step %c1 iter_args(%arg10 = %arg8) -> (tensor<1x1x8x8x4x4xi32>) {
%20 = scf.for %arg11 = %c0 to %c1 step %c1 iter_args(%arg12 = %arg10) -> (tensor<1x1x8x8x4x4xi32>) {
%21 = scf.for %arg13 = %c0 to %c8 step %c1 iter_args(%arg14 = %arg12) -> (tensor<1x1x8x8x4x4xi32>) {
%22 = scf.for %arg15 = %c0 to %c8 step %c1 iter_args(%arg16 = %arg14) -> (tensor<1x1x8x8x4x4xi32>) {
%23 = scf.for %arg17 = %c0 to %c4 step %c1 iter_args(%arg18 = %arg16) -> (tensor<1x1x8x8x4x4xi32>) {
%extracted_slice_21 = tensor.extract_slice %pack_15[%arg7, %arg11, %arg17, %arg13, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : tensor<1x1x4x8x4x8xi32> to tensor<1x1x1x1x4x8xi32>
%extracted_slice_22 = tensor.extract_slice %pack_17[%arg11, %arg9, %arg15, %arg17, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x4x8x4xi32> to tensor<1x1x1x1x8x4xi32>
%extracted_slice_23 = tensor.extract_slice %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> to tensor<1x1x1x1x4x4xi32>
%24 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%extracted_slice_21, %extracted_slice_22 : tensor<1x1x1x1x4x8xi32>, tensor<1x1x1x1x8x4xi32>) outs(%extracted_slice_23 : tensor<1x1x1x1x4x4xi32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_24: i32, %out: i32):
%25 = arith.muli %in, %in_24 : i32
%26 = arith.addi %out, %25 : i32
linalg.yield %26 : i32
} -> tensor<1x1x1x1x4x4xi32>
%inserted_slice = tensor.insert_slice %24 into %arg18[%arg7, %arg9, %arg15, %arg13, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x1x1x4x4xi32> into tensor<1x1x8x8x4x4xi32>
scf.yield %inserted_slice : tensor<1x1x8x8x4x4xi32>
}
scf.yield %23 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %22 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %21 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %20 : tensor<1x1x8x8x4x4xi32>
}
scf.yield %19 : tensor<1x1x8x8x4x4xi32>
}
%extracted_slice_19 = tensor.extract_slice %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<2x2x32x32xi32> to tensor<1x1x32x32xi32>
%unpack_20 = tensor.unpack %18 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %extracted_slice_19 : tensor<1x1x8x8x4x4xi32> -> tensor<1x1x32x32xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack_20 into %arg6[%arg3, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : tensor<1x1x32x32xi32> into tensor<2x2x32x32xi32>
tensor.parallel_insert_slice %18 into %arg5[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : tensor<1x1x8x8x4x4xi32> into tensor<2x2x8x8x4x4xi32>
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%unpack = tensor.unpack %15#1 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %extracted_slice_6 : tensor<2x2x32x32xi32> -> tensor<64x64xi32>
scf.forall.in_parallel {
tensor.parallel_insert_slice %unpack into %arg2[%arg0, %arg1] [64, 64] [1, 1] : tensor<64x64xi32> into tensor<128x128xi32>
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
flow.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [128, 128], strides = [1, 1] : tensor<128x128xi32> -> !flow.dispatch.tensor<writeonly:tensor<128x128xi32>>
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before ResolveShapedTypeResultDims (resolve-shaped-type-result-dims) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_12 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_14 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
%4 = scf.for %arg4 = %c0 to %c1 step %c1 iter_args(%arg5 = %subview_14) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%5 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %arg5) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%6 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%7 = scf.for %arg10 = %c0 to %c8 step %c1 iter_args(%arg11 = %arg9) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%8 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%9 = scf.for %arg14 = %c0 to %c4 step %c1 iter_args(%arg15 = %arg13) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%subview_16 = memref.subview %alloc_0[%arg4, %arg8, %arg14, %arg10, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[%arg8, %arg6, %arg12, %arg14, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%10 = arith.muli %in, %in_20 : i32
%11 = arith.addi %out, %10 : i32
linalg.yield %11 : i32
}
%subview_19 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_19 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
scf.yield %arg15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %9 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %8 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %7 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %6 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %5 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
%subview_15 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%4 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%3 = scf.for %arg2 = %c1 to %c7 step %c1 iter_args(%arg3 = %alloc_3) -> (memref<2x2x8x8x4x4xi32, 2 : i32>) {
%4 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_12 = memref.subview %subview[0, %4] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_13 = memref.subview %subview_5[%4, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg4, %arg5) in (2, 2) {
%subview_14 = memref.subview %alloc_2[%arg4, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_1[0, %arg5, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_16 = memref.subview %arg3[%arg4, %arg5, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%5 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %subview_16) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%6 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%7 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%8 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%9 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%10 = scf.for %arg16 = %c0 to %c4 step %c1 iter_args(%arg17 = %arg15) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%subview_18 = memref.subview %alloc_0[%arg6, %arg10, %arg16, %arg12, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_19 = memref.subview %alloc[%arg10, %arg8, %arg14, %arg16, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_20 = memref.subview %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_18, %subview_19 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%11 = arith.muli %in, %in_22 : i32
%12 = arith.addi %out, %11 : i32
linalg.yield %12 : i32
}
%subview_21 = memref.subview %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_21 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
scf.yield %arg17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %10 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %9 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %8 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %7 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %6 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
%subview_17 = memref.subview %arg3[%arg4, %arg5, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %arg3 : memref<2x2x8x8x4x4xi32, 2 : i32>
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_12 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_14 = memref.subview %3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%4 = scf.for %arg4 = %c0 to %c1 step %c1 iter_args(%arg5 = %subview_14) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%5 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %arg5) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%6 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%7 = scf.for %arg10 = %c0 to %c8 step %c1 iter_args(%arg11 = %arg9) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%8 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%9 = scf.for %arg14 = %c0 to %c4 step %c1 iter_args(%arg15 = %arg13) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%subview_18 = memref.subview %alloc_0[%arg4, %arg8, %arg14, %arg10, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_19 = memref.subview %alloc[%arg8, %arg6, %arg12, %arg14, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_20 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_18, %subview_19 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%10 = arith.muli %in, %in_22 : i32
%11 = arith.addi %out, %10 : i32
linalg.yield %11 : i32
}
%subview_21 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_21 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
scf.yield %arg15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %9 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %8 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %7 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %6 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %5 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
%subview_15 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_15 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
%subview_16 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_15 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) outs(%subview_16 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
%subview_17 = memref.subview %3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%4 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
%subview_11 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_6 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_11 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%2 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>) outs(%2 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_12 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_14 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
%4 = scf.for %arg4 = %c0 to %c1 step %c1 iter_args(%arg5 = %subview_14) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%5 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %arg5) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%6 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%7 = scf.for %arg10 = %c0 to %c8 step %c1 iter_args(%arg11 = %arg9) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%8 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%9 = scf.for %arg14 = %c0 to %c4 step %c1 iter_args(%arg15 = %arg13) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%subview_16 = memref.subview %alloc_0[%arg4, %arg8, %arg14, %arg10, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[%arg8, %arg6, %arg12, %arg14, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%10 = arith.muli %in, %in_20 : i32
%11 = arith.addi %out, %10 : i32
linalg.yield %11 : i32
}
%subview_19 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_19 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
scf.yield %arg15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %9 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %8 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %7 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %6 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %5 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
%subview_15 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%4 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%3 = scf.for %arg2 = %c1 to %c7 step %c1 iter_args(%arg3 = %alloc_3) -> (memref<2x2x8x8x4x4xi32, 2 : i32>) {
%4 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_12 = memref.subview %subview[0, %4] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_13 = memref.subview %subview_5[%4, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg4, %arg5) in (2, 2) {
%subview_14 = memref.subview %alloc_2[%arg4, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_1[0, %arg5, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_16 = memref.subview %arg3[%arg4, %arg5, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%5 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %subview_16) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%6 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%7 = scf.for %arg10 = %c0 to %c1 step %c1 iter_args(%arg11 = %arg9) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%8 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%9 = scf.for %arg14 = %c0 to %c8 step %c1 iter_args(%arg15 = %arg13) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%10 = scf.for %arg16 = %c0 to %c4 step %c1 iter_args(%arg17 = %arg15) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%subview_18 = memref.subview %alloc_0[%arg6, %arg10, %arg16, %arg12, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_19 = memref.subview %alloc[%arg10, %arg8, %arg14, %arg16, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_20 = memref.subview %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_18, %subview_19 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%11 = arith.muli %in, %in_22 : i32
%12 = arith.addi %out, %11 : i32
linalg.yield %12 : i32
}
%subview_21 = memref.subview %arg17[%arg6, %arg8, %arg14, %arg12, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_21 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
scf.yield %arg17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %10 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %9 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %8 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %7 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %6 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
%subview_17 = memref.subview %arg3[%arg4, %arg5, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%5 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.yield %arg3 : memref<2x2x8x8x4x4xi32, 2 : i32>
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_12 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_14 = memref.subview %3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%4 = scf.for %arg4 = %c0 to %c1 step %c1 iter_args(%arg5 = %subview_14) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%5 = scf.for %arg6 = %c0 to %c1 step %c1 iter_args(%arg7 = %arg5) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%6 = scf.for %arg8 = %c0 to %c1 step %c1 iter_args(%arg9 = %arg7) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%7 = scf.for %arg10 = %c0 to %c8 step %c1 iter_args(%arg11 = %arg9) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%8 = scf.for %arg12 = %c0 to %c8 step %c1 iter_args(%arg13 = %arg11) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%9 = scf.for %arg14 = %c0 to %c4 step %c1 iter_args(%arg15 = %arg13) -> (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
%subview_18 = memref.subview %alloc_0[%arg4, %arg8, %arg14, %arg10, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_19 = memref.subview %alloc[%arg8, %arg6, %arg12, %arg14, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_20 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_18, %subview_19 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%10 = arith.muli %in, %in_22 : i32
%11 = arith.addi %out, %10 : i32
linalg.yield %11 : i32
}
%subview_21 = memref.subview %arg15[%arg4, %arg6, %arg12, %arg10, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_21 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
scf.yield %arg15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %9 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %8 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %7 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %6 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
scf.yield %5 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
}
%subview_15 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_15 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
%subview_16 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_15 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) outs(%subview_16 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
%subview_17 = memref.subview %3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%4 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
%subview_11 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_6 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_11 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%2 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>) outs(%2 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before CSE (cse) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_12 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_14 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_14[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_20: i32, %out: i32):
%3 = arith.muli %in, %in_20 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
%subview_19 = memref.subview %subview_14[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_19 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
}
}
}
%subview_15 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_14 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_12 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_13 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_14 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_15 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_16 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_18 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_19 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_20 = memref.subview %subview_16[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_18, %subview_19 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%4 = arith.muli %in, %in_22 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
%subview_21 = memref.subview %subview_16[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_21 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
}
}
}
%subview_17 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_16 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_12 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_18 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_19 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_20 = memref.subview %subview_14[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_18, %subview_19 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_22: i32, %out: i32):
%3 = arith.muli %in, %in_22 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
%subview_21 = memref.subview %subview_14[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_20 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_21 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
}
}
}
%subview_15 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_15 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
%subview_16 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_15 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) outs(%subview_16 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
%subview_17 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_14 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
%subview_11 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_6 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_11 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_14 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_14, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%3 = arith.muli %in, %in_17 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
}
}
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_11 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_12 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_13 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_15[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%4 = arith.muli %in, %in_19 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
}
}
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_15 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_15 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_15, %subview_16 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%3 = arith.muli %in, %in_18 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
}
}
}
%subview_14 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_14 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%subview_14 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) outs(%subview_14 : memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>, affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]} ins(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} ins(%subview_6 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) outs(%subview_6 : memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>) {
^bb0(%in: i32, %out: i32):
linalg.yield %in : i32
}
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before CleanupBufferAllocView (iree-codegen-cleanup-buffer-alloc-view) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_14 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_14, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%3 = arith.muli %in, %in_17 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_11 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_12 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_13 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_15[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%4 = arith.muli %in, %in_19 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_15 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_15, %subview_16 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%3 = arith.muli %in, %in_18 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_14 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_14 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before HoistStaticallyBoundAllocations (iree-hoist-statically-bound-allocations) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_14 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_14, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%3 = arith.muli %in, %in_17 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_11 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_12 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_13 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_15[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%4 = arith.muli %in, %in_19 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_15 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_15, %subview_16 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%3 = arith.muli %in, %in_18 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_14 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_14 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
// -----// IR Dump Before LowerUKernelOpsToCalls (iree-codegen-lower-ukernel-ops-to-calls) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_14 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_14, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%3 = arith.muli %in, %in_17 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_11 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_12 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_13 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_15[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%4 = arith.muli %in, %in_19 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_15 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_15, %subview_16 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%3 = arith.muli %in, %in_18 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_14 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_14 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before EraseHALDescriptorTypeFromMemRef (iree-codegen-erase-hal-descriptor-type-from-memref) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %0, 64 : memref<128x256xi32, #hal.descriptor_type<storage_buffer>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %1, 64 : memref<256x128xi32, #hal.descriptor_type<storage_buffer>>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
memref.assume_alignment %2, 64 : memref<128x128xi32, #hal.descriptor_type<storage_buffer>>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32, #hal.descriptor_type<storage_buffer>> to memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32, #hal.descriptor_type<storage_buffer>> to memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32, #hal.descriptor_type<storage_buffer>> to memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_14 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_14, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%3 = arith.muli %in, %in_17 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_11 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_12 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_13 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_15[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%4 = arith.muli %in, %in_19 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> to memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_15 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_15, %subview_16 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%3 = arith.muli %in, %in_18 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_14 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_14 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>, #hal.descriptor_type<storage_buffer>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before FoldMemRefAliasOps (fold-memref-alias-ops) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
memref.assume_alignment %0, 64 : memref<128x256xi32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
memref.assume_alignment %1, 64 : memref<256x128xi32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
memref.assume_alignment %2, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %0[%arg0, 0] [64, 256] [1, 1] : memref<128x256xi32> to memref<64x256xi32, strided<[256, 1], offset: ?>>
%subview_5 = memref.subview %1[0, %arg1] [256, 64] [1, 1] : memref<256x128xi32> to memref<256x64xi32, strided<[128, 1], offset: ?>>
%subview_6 = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32> to memref<64x64xi32, strided<[128, 1], offset: ?>>
%subview_7 = memref.subview %subview[0, 0] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>> to memref<64x32xi32, strided<[256, 1], offset: ?>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %subview_5[0, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>> to memref<32x64xi32, strided<[128, 1], offset: ?>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_13 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_14 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_14, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%3 = arith.muli %in, %in_17 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_11 = memref.subview %subview[0, %3] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>> to memref<64x32xi32, strided<[256, 1], offset: ?>>
iree_linalg_ext.pack %subview_11 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>> memref<2x1x32x32xi32, 1 : i32>)
%subview_12 = memref.subview %subview_5[%3, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>> to memref<32x64xi32, strided<[128, 1], offset: ?>>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_13 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_14 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_14 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_16 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_18 = memref.subview %subview_15[0, 0, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_16, %subview_17 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_18 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_19: i32, %out: i32):
%4 = arith.muli %in, %in_19 : i32
%5 = arith.addi %out, %4 : i32
linalg.yield %5 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_9 = memref.subview %subview[0, 224] [64, 32] [1, 1] : memref<64x256xi32, strided<[256, 1], offset: ?>> to memref<64x32xi32, strided<[256, 1], offset: ?>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>> memref<2x1x32x32xi32, 1 : i32>)
%subview_10 = memref.subview %subview_5[224, 0] [32, 64] [1, 1] : memref<256x64xi32, strided<[128, 1], offset: ?>> to memref<32x64xi32, strided<[128, 1], offset: ?>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_15 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_17 = memref.subview %subview_13[0, 0, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_15, %subview_16 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_17 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_18: i32, %out: i32):
%3 = arith.muli %in, %in_18 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_14 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_13 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_14 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview_6 : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before AMDAIEPackToDma (iree-amdaie-pack-to-dma) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
memref.assume_alignment %0, 64 : memref<128x256xi32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
memref.assume_alignment %1, 64 : memref<256x128xi32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
memref.assume_alignment %2, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32> to memref<64x64xi32, strided<[128, 1], offset: ?>>
%subview_5 = memref.subview %0[%arg0, 0] [64, 32] [1, 1] : memref<128x256xi32> to memref<64x32xi32, strided<[256, 1], offset: ?>>
iree_linalg_ext.pack %subview_5 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>> memref<2x1x32x32xi32, 1 : i32>)
%subview_6 = memref.subview %1[0, %arg1] [32, 64] [1, 1] : memref<256x128xi32> to memref<32x64xi32, strided<[128, 1], offset: ?>>
iree_linalg_ext.pack %subview_6 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_9 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_10 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_11 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_11 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_12 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_14 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_12, %subview_13 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_14 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_15: i32, %out: i32):
%3 = arith.muli %in, %in_15 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_9 = memref.subview %0[%arg0, %3] [64, 32] [1, 1] : memref<128x256xi32> to memref<64x32xi32, strided<[256, 1], offset: ?>>
iree_linalg_ext.pack %subview_9 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>> memref<2x1x32x32xi32, 1 : i32>)
%4 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_10 = memref.subview %1[%4, %arg1] [32, 64] [1, 1] : memref<256x128xi32> to memref<32x64xi32, strided<[128, 1], offset: ?>>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_11 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_12 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_12 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_13 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_14 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_13, %subview_14 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_15 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_16: i32, %out: i32):
%5 = arith.muli %in, %in_16 : i32
%6 = arith.addi %out, %5 : i32
linalg.yield %6 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_7 = memref.subview %0[%arg0, 224] [64, 32] [1, 1] : memref<128x256xi32> to memref<64x32xi32, strided<[256, 1], offset: ?>>
iree_linalg_ext.pack %subview_7 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_2 : (memref<64x32xi32, strided<[256, 1], offset: ?>> memref<2x1x32x32xi32, 1 : i32>)
%subview_8 = memref.subview %1[224, %arg1] [32, 64] [1, 1] : memref<256x128xi32> to memref<32x64xi32, strided<[128, 1], offset: ?>>
iree_linalg_ext.pack %subview_8 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %alloc_1 : (memref<32x64xi32, strided<[128, 1], offset: ?>> memref<1x2x32x32xi32, 1 : i32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_9 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_9 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_0 : (memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x4x8x4x8xi32, 2 : i32>)
%subview_10 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.pack %subview_10 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 4] into %alloc : (memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32> memref<1x1x8x4x8x4xi32, 2 : i32>)
%subview_11 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_13 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_14 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_13, %subview_14 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_15 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_16: i32, %out: i32):
%3 = arith.muli %in, %in_16 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_12 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
iree_linalg_ext.unpack %subview_11 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 4] into %subview_12 : (memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32> memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
iree_linalg_ext.unpack %alloc_4 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %subview : (memref<2x2x32x32xi32, 1 : i32> memref<64x64xi32, strided<[128, 1], offset: ?>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before CopyToDma (air-copy-to-dma) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
memref.assume_alignment %0, 64 : memref<128x256xi32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
memref.assume_alignment %1, 64 : memref<256x128xi32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
memref.assume_alignment %2, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%subview = memref.subview %2[%arg0, %arg1] [64, 64] [1, 1] : memref<128x128xi32> to memref<64x64xi32, strided<[128, 1], offset: ?>>
%subview_5 = memref.subview %0[%arg0, 0] [64, 32] [1, 1] : memref<128x256xi32> to memref<64x32xi32, strided<[256, 1], offset: ?>>
%c0_6 = arith.constant 0 : index
%c0_7 = arith.constant 0 : index
%c0_8 = arith.constant 0 : index
%c0_9 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c1_10 = arith.constant 1 : index
%c32 = arith.constant 32 : index
%c32_11 = arith.constant 32 : index
%c1024 = arith.constant 1024 : index
%c1024_12 = arith.constant 1024 : index
%c32_13 = arith.constant 32 : index
%c1_14 = arith.constant 1 : index
%c0_15 = arith.constant 0 : index
%c0_16 = arith.constant 0 : index
%c0_17 = arith.constant 0 : index
%c2_18 = arith.constant 2 : index
%c1_19 = arith.constant 1 : index
%c32_20 = arith.constant 32 : index
%c32_21 = arith.constant 32 : index
%c8192 = arith.constant 8192 : index
%c32_22 = arith.constant 32 : index
%c256 = arith.constant 256 : index
%c1_23 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_2[%c0_6, %c0_7, %c0_8, %c0_9] [%c2, %c1_10, %c32, %c32_11] [%c1024, %c1024_12, %c32_13, %c1_14], %0[%c0_15, %c0_16, %arg0, %c0_17] [%c2_18, %c1_19, %c32_20, %c32_21] [%c8192, %c32_22, %c256, %c1_23]) : (memref<2x1x32x32xi32, 1 : i32>, memref<128x256xi32>)
%subview_24 = memref.subview %1[0, %arg1] [32, 64] [1, 1] : memref<256x128xi32> to memref<32x64xi32, strided<[128, 1], offset: ?>>
%c0_25 = arith.constant 0 : index
%c0_26 = arith.constant 0 : index
%c0_27 = arith.constant 0 : index
%c0_28 = arith.constant 0 : index
%c1_29 = arith.constant 1 : index
%c2_30 = arith.constant 2 : index
%c32_31 = arith.constant 32 : index
%c32_32 = arith.constant 32 : index
%c2048 = arith.constant 2048 : index
%c1024_33 = arith.constant 1024 : index
%c32_34 = arith.constant 32 : index
%c1_35 = arith.constant 1 : index
%c0_36 = arith.constant 0 : index
%c0_37 = arith.constant 0 : index
%c0_38 = arith.constant 0 : index
%c1_39 = arith.constant 1 : index
%c2_40 = arith.constant 2 : index
%c32_41 = arith.constant 32 : index
%c32_42 = arith.constant 32 : index
%c4096 = arith.constant 4096 : index
%c32_43 = arith.constant 32 : index
%c128 = arith.constant 128 : index
%c1_44 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_1[%c0_25, %c0_26, %c0_27, %c0_28] [%c1_29, %c2_30, %c32_31, %c32_32] [%c2048, %c1024_33, %c32_34, %c1_35], %1[%c0_36, %c0_37, %c0_38, %arg1] [%c1_39, %c2_40, %c32_41, %c32_42] [%c4096, %c32_43, %c128, %c1_44]) : (memref<1x2x32x32xi32, 1 : i32>, memref<256x128xi32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_107 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_108 = arith.constant 0 : index
%c0_109 = arith.constant 0 : index
%c0_110 = arith.constant 0 : index
%c0_111 = arith.constant 0 : index
%c0_112 = arith.constant 0 : index
%c0_113 = arith.constant 0 : index
%c1_114 = arith.constant 1 : index
%c1_115 = arith.constant 1 : index
%c4_116 = arith.constant 4 : index
%c8_117 = arith.constant 8 : index
%c4_118 = arith.constant 4 : index
%c8_119 = arith.constant 8 : index
%c1024_120 = arith.constant 1024 : index
%c1024_121 = arith.constant 1024 : index
%c256_122 = arith.constant 256 : index
%c32_123 = arith.constant 32 : index
%c8_124 = arith.constant 8 : index
%c1_125 = arith.constant 1 : index
%c0_126 = arith.constant 0 : index
%c0_127 = arith.constant 0 : index
%c0_128 = arith.constant 0 : index
%c0_129 = arith.constant 0 : index
%c0_130 = arith.constant 0 : index
%c1_131 = arith.constant 1 : index
%c1_132 = arith.constant 1 : index
%c4_133 = arith.constant 4 : index
%c8_134 = arith.constant 8 : index
%c4_135 = arith.constant 4 : index
%c8_136 = arith.constant 8 : index
%c1024_137 = arith.constant 1024 : index
%c1024_138 = arith.constant 1024 : index
%c8_139 = arith.constant 8 : index
%c128_140 = arith.constant 128 : index
%c32_141 = arith.constant 32 : index
%c1_142 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_0[%c0_108, %c0_109, %c0_110, %c0_111, %c0_112, %c0_113] [%c1_114, %c1_115, %c4_116, %c8_117, %c4_118, %c8_119] [%c1024_120, %c1024_121, %c256_122, %c32_123, %c8_124, %c1_125], %alloc_2[%arg2, %c0_126, %c0_127, %c0_128, %c0_129, %c0_130] [%c1_131, %c1_132, %c4_133, %c8_134, %c4_135, %c8_136] [%c1024_137, %c1024_138, %c8_139, %c128_140, %c32_141, %c1_142]) : (memref<1x1x4x8x4x8xi32, 2 : i32>, memref<2x1x32x32xi32, 1 : i32>)
%subview_143 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_144 = arith.constant 0 : index
%c0_145 = arith.constant 0 : index
%c0_146 = arith.constant 0 : index
%c0_147 = arith.constant 0 : index
%c0_148 = arith.constant 0 : index
%c0_149 = arith.constant 0 : index
%c1_150 = arith.constant 1 : index
%c1_151 = arith.constant 1 : index
%c8_152 = arith.constant 8 : index
%c4_153 = arith.constant 4 : index
%c8_154 = arith.constant 8 : index
%c4_155 = arith.constant 4 : index
%c1024_156 = arith.constant 1024 : index
%c1024_157 = arith.constant 1024 : index
%c128_158 = arith.constant 128 : index
%c32_159 = arith.constant 32 : index
%c4_160 = arith.constant 4 : index
%c1_161 = arith.constant 1 : index
%c0_162 = arith.constant 0 : index
%c0_163 = arith.constant 0 : index
%c0_164 = arith.constant 0 : index
%c0_165 = arith.constant 0 : index
%c0_166 = arith.constant 0 : index
%c1_167 = arith.constant 1 : index
%c1_168 = arith.constant 1 : index
%c8_169 = arith.constant 8 : index
%c4_170 = arith.constant 4 : index
%c8_171 = arith.constant 8 : index
%c4_172 = arith.constant 4 : index
%c2048_173 = arith.constant 2048 : index
%c1024_174 = arith.constant 1024 : index
%c4_175 = arith.constant 4 : index
%c256_176 = arith.constant 256 : index
%c32_177 = arith.constant 32 : index
%c1_178 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc[%c0_144, %c0_145, %c0_146, %c0_147, %c0_148, %c0_149] [%c1_150, %c1_151, %c8_152, %c4_153, %c8_154, %c4_155] [%c1024_156, %c1024_157, %c128_158, %c32_159, %c4_160, %c1_161], %alloc_1[%c0_162, %arg3, %c0_163, %c0_164, %c0_165, %c0_166] [%c1_167, %c1_168, %c8_169, %c4_170, %c8_171, %c4_172] [%c2048_173, %c1024_174, %c4_175, %c256_176, %c32_177, %c1_178]) : (memref<1x1x8x4x8x4xi32, 2 : i32>, memref<1x2x32x32xi32, 1 : i32>)
%subview_179 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview_179 : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_180 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_181 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_182 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_180, %subview_181 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_182 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_183: i32, %out: i32):
%3 = arith.muli %in, %in_183 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_107 = memref.subview %0[%arg0, %3] [64, 32] [1, 1] : memref<128x256xi32> to memref<64x32xi32, strided<[256, 1], offset: ?>>
%c0_108 = arith.constant 0 : index
%c0_109 = arith.constant 0 : index
%c0_110 = arith.constant 0 : index
%c0_111 = arith.constant 0 : index
%c2_112 = arith.constant 2 : index
%c1_113 = arith.constant 1 : index
%c32_114 = arith.constant 32 : index
%c32_115 = arith.constant 32 : index
%c1024_116 = arith.constant 1024 : index
%c1024_117 = arith.constant 1024 : index
%c32_118 = arith.constant 32 : index
%c1_119 = arith.constant 1 : index
%c0_120 = arith.constant 0 : index
%c0_121 = arith.constant 0 : index
%c2_122 = arith.constant 2 : index
%c1_123 = arith.constant 1 : index
%c32_124 = arith.constant 32 : index
%c32_125 = arith.constant 32 : index
%c8192_126 = arith.constant 8192 : index
%c32_127 = arith.constant 32 : index
%c256_128 = arith.constant 256 : index
%c1_129 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_2[%c0_108, %c0_109, %c0_110, %c0_111] [%c2_112, %c1_113, %c32_114, %c32_115] [%c1024_116, %c1024_117, %c32_118, %c1_119], %0[%c0_120, %c0_121, %arg0, %3] [%c2_122, %c1_123, %c32_124, %c32_125] [%c8192_126, %c32_127, %c256_128, %c1_129]) : (memref<2x1x32x32xi32, 1 : i32>, memref<128x256xi32>)
%4 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%subview_130 = memref.subview %1[%4, %arg1] [32, 64] [1, 1] : memref<256x128xi32> to memref<32x64xi32, strided<[128, 1], offset: ?>>
%c0_131 = arith.constant 0 : index
%c0_132 = arith.constant 0 : index
%c0_133 = arith.constant 0 : index
%c0_134 = arith.constant 0 : index
%c1_135 = arith.constant 1 : index
%c2_136 = arith.constant 2 : index
%c32_137 = arith.constant 32 : index
%c32_138 = arith.constant 32 : index
%c2048_139 = arith.constant 2048 : index
%c1024_140 = arith.constant 1024 : index
%c32_141 = arith.constant 32 : index
%c1_142 = arith.constant 1 : index
%c0_143 = arith.constant 0 : index
%c0_144 = arith.constant 0 : index
%c1_145 = arith.constant 1 : index
%c2_146 = arith.constant 2 : index
%c32_147 = arith.constant 32 : index
%c32_148 = arith.constant 32 : index
%c4096_149 = arith.constant 4096 : index
%c32_150 = arith.constant 32 : index
%c128_151 = arith.constant 128 : index
%c1_152 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_1[%c0_131, %c0_132, %c0_133, %c0_134] [%c1_135, %c2_136, %c32_137, %c32_138] [%c2048_139, %c1024_140, %c32_141, %c1_142], %1[%c0_143, %c0_144, %4, %arg1] [%c1_145, %c2_146, %c32_147, %c32_148] [%c4096_149, %c32_150, %c128_151, %c1_152]) : (memref<1x2x32x32xi32, 1 : i32>, memref<256x128xi32>)
scf.forall (%arg3, %arg4) in (2, 2) {
%subview_153 = memref.subview %alloc_2[%arg3, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_154 = arith.constant 0 : index
%c0_155 = arith.constant 0 : index
%c0_156 = arith.constant 0 : index
%c0_157 = arith.constant 0 : index
%c0_158 = arith.constant 0 : index
%c0_159 = arith.constant 0 : index
%c1_160 = arith.constant 1 : index
%c1_161 = arith.constant 1 : index
%c4_162 = arith.constant 4 : index
%c8_163 = arith.constant 8 : index
%c4_164 = arith.constant 4 : index
%c8_165 = arith.constant 8 : index
%c1024_166 = arith.constant 1024 : index
%c1024_167 = arith.constant 1024 : index
%c256_168 = arith.constant 256 : index
%c32_169 = arith.constant 32 : index
%c8_170 = arith.constant 8 : index
%c1_171 = arith.constant 1 : index
%c0_172 = arith.constant 0 : index
%c0_173 = arith.constant 0 : index
%c0_174 = arith.constant 0 : index
%c0_175 = arith.constant 0 : index
%c0_176 = arith.constant 0 : index
%c1_177 = arith.constant 1 : index
%c1_178 = arith.constant 1 : index
%c4_179 = arith.constant 4 : index
%c8_180 = arith.constant 8 : index
%c4_181 = arith.constant 4 : index
%c8_182 = arith.constant 8 : index
%c1024_183 = arith.constant 1024 : index
%c1024_184 = arith.constant 1024 : index
%c8_185 = arith.constant 8 : index
%c128_186 = arith.constant 128 : index
%c32_187 = arith.constant 32 : index
%c1_188 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_0[%c0_154, %c0_155, %c0_156, %c0_157, %c0_158, %c0_159] [%c1_160, %c1_161, %c4_162, %c8_163, %c4_164, %c8_165] [%c1024_166, %c1024_167, %c256_168, %c32_169, %c8_170, %c1_171], %alloc_2[%arg3, %c0_172, %c0_173, %c0_174, %c0_175, %c0_176] [%c1_177, %c1_178, %c4_179, %c8_180, %c4_181, %c8_182] [%c1024_183, %c1024_184, %c8_185, %c128_186, %c32_187, %c1_188]) : (memref<1x1x4x8x4x8xi32, 2 : i32>, memref<2x1x32x32xi32, 1 : i32>)
%subview_189 = memref.subview %alloc_1[0, %arg4, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_190 = arith.constant 0 : index
%c0_191 = arith.constant 0 : index
%c0_192 = arith.constant 0 : index
%c0_193 = arith.constant 0 : index
%c0_194 = arith.constant 0 : index
%c0_195 = arith.constant 0 : index
%c1_196 = arith.constant 1 : index
%c1_197 = arith.constant 1 : index
%c8_198 = arith.constant 8 : index
%c4_199 = arith.constant 4 : index
%c8_200 = arith.constant 8 : index
%c4_201 = arith.constant 4 : index
%c1024_202 = arith.constant 1024 : index
%c1024_203 = arith.constant 1024 : index
%c128_204 = arith.constant 128 : index
%c32_205 = arith.constant 32 : index
%c4_206 = arith.constant 4 : index
%c1_207 = arith.constant 1 : index
%c0_208 = arith.constant 0 : index
%c0_209 = arith.constant 0 : index
%c0_210 = arith.constant 0 : index
%c0_211 = arith.constant 0 : index
%c0_212 = arith.constant 0 : index
%c1_213 = arith.constant 1 : index
%c1_214 = arith.constant 1 : index
%c8_215 = arith.constant 8 : index
%c4_216 = arith.constant 4 : index
%c8_217 = arith.constant 8 : index
%c4_218 = arith.constant 4 : index
%c2048_219 = arith.constant 2048 : index
%c1024_220 = arith.constant 1024 : index
%c4_221 = arith.constant 4 : index
%c256_222 = arith.constant 256 : index
%c32_223 = arith.constant 32 : index
%c1_224 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc[%c0_190, %c0_191, %c0_192, %c0_193, %c0_194, %c0_195] [%c1_196, %c1_197, %c8_198, %c4_199, %c8_200, %c4_201] [%c1024_202, %c1024_203, %c128_204, %c32_205, %c4_206, %c1_207], %alloc_1[%c0_208, %arg4, %c0_209, %c0_210, %c0_211, %c0_212] [%c1_213, %c1_214, %c8_215, %c4_216, %c8_217, %c4_218] [%c2048_219, %c1024_220, %c4_221, %c256_222, %c32_223, %c1_224]) : (memref<1x1x8x4x8x4xi32, 2 : i32>, memref<1x2x32x32xi32, 1 : i32>)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview_225 = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_226 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_227 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_225, %subview_226 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_227 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_228: i32, %out: i32):
%5 = arith.muli %in, %in_228 : i32
%6 = arith.addi %out, %5 : i32
linalg.yield %6 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%subview_45 = memref.subview %0[%arg0, 224] [64, 32] [1, 1] : memref<128x256xi32> to memref<64x32xi32, strided<[256, 1], offset: ?>>
%c0_46 = arith.constant 0 : index
%c0_47 = arith.constant 0 : index
%c0_48 = arith.constant 0 : index
%c0_49 = arith.constant 0 : index
%c2_50 = arith.constant 2 : index
%c1_51 = arith.constant 1 : index
%c32_52 = arith.constant 32 : index
%c32_53 = arith.constant 32 : index
%c1024_54 = arith.constant 1024 : index
%c1024_55 = arith.constant 1024 : index
%c32_56 = arith.constant 32 : index
%c1_57 = arith.constant 1 : index
%c0_58 = arith.constant 0 : index
%c0_59 = arith.constant 0 : index
%c224 = arith.constant 224 : index
%c2_60 = arith.constant 2 : index
%c1_61 = arith.constant 1 : index
%c32_62 = arith.constant 32 : index
%c32_63 = arith.constant 32 : index
%c8192_64 = arith.constant 8192 : index
%c32_65 = arith.constant 32 : index
%c256_66 = arith.constant 256 : index
%c1_67 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_2[%c0_46, %c0_47, %c0_48, %c0_49] [%c2_50, %c1_51, %c32_52, %c32_53] [%c1024_54, %c1024_55, %c32_56, %c1_57], %0[%c0_58, %c0_59, %arg0, %c224] [%c2_60, %c1_61, %c32_62, %c32_63] [%c8192_64, %c32_65, %c256_66, %c1_67]) : (memref<2x1x32x32xi32, 1 : i32>, memref<128x256xi32>)
%subview_68 = memref.subview %1[224, %arg1] [32, 64] [1, 1] : memref<256x128xi32> to memref<32x64xi32, strided<[128, 1], offset: ?>>
%c0_69 = arith.constant 0 : index
%c0_70 = arith.constant 0 : index
%c0_71 = arith.constant 0 : index
%c0_72 = arith.constant 0 : index
%c1_73 = arith.constant 1 : index
%c2_74 = arith.constant 2 : index
%c32_75 = arith.constant 32 : index
%c32_76 = arith.constant 32 : index
%c2048_77 = arith.constant 2048 : index
%c1024_78 = arith.constant 1024 : index
%c32_79 = arith.constant 32 : index
%c1_80 = arith.constant 1 : index
%c0_81 = arith.constant 0 : index
%c0_82 = arith.constant 0 : index
%c224_83 = arith.constant 224 : index
%c1_84 = arith.constant 1 : index
%c2_85 = arith.constant 2 : index
%c32_86 = arith.constant 32 : index
%c32_87 = arith.constant 32 : index
%c4096_88 = arith.constant 4096 : index
%c32_89 = arith.constant 32 : index
%c128_90 = arith.constant 128 : index
%c1_91 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_1[%c0_69, %c0_70, %c0_71, %c0_72] [%c1_73, %c2_74, %c32_75, %c32_76] [%c2048_77, %c1024_78, %c32_79, %c1_80], %1[%c0_81, %c0_82, %c224_83, %arg1] [%c1_84, %c2_85, %c32_86, %c32_87] [%c4096_88, %c32_89, %c128_90, %c1_91]) : (memref<1x2x32x32xi32, 1 : i32>, memref<256x128xi32>)
scf.forall (%arg2, %arg3) in (2, 2) {
%subview_107 = memref.subview %alloc_2[%arg2, 0, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x1x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[1024, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_108 = arith.constant 0 : index
%c0_109 = arith.constant 0 : index
%c0_110 = arith.constant 0 : index
%c0_111 = arith.constant 0 : index
%c0_112 = arith.constant 0 : index
%c0_113 = arith.constant 0 : index
%c1_114 = arith.constant 1 : index
%c1_115 = arith.constant 1 : index
%c4_116 = arith.constant 4 : index
%c8_117 = arith.constant 8 : index
%c4_118 = arith.constant 4 : index
%c8_119 = arith.constant 8 : index
%c1024_120 = arith.constant 1024 : index
%c1024_121 = arith.constant 1024 : index
%c256_122 = arith.constant 256 : index
%c32_123 = arith.constant 32 : index
%c8_124 = arith.constant 8 : index
%c1_125 = arith.constant 1 : index
%c0_126 = arith.constant 0 : index
%c0_127 = arith.constant 0 : index
%c0_128 = arith.constant 0 : index
%c0_129 = arith.constant 0 : index
%c0_130 = arith.constant 0 : index
%c1_131 = arith.constant 1 : index
%c1_132 = arith.constant 1 : index
%c4_133 = arith.constant 4 : index
%c8_134 = arith.constant 8 : index
%c4_135 = arith.constant 4 : index
%c8_136 = arith.constant 8 : index
%c1024_137 = arith.constant 1024 : index
%c1024_138 = arith.constant 1024 : index
%c8_139 = arith.constant 8 : index
%c128_140 = arith.constant 128 : index
%c32_141 = arith.constant 32 : index
%c1_142 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_0[%c0_108, %c0_109, %c0_110, %c0_111, %c0_112, %c0_113] [%c1_114, %c1_115, %c4_116, %c8_117, %c4_118, %c8_119] [%c1024_120, %c1024_121, %c256_122, %c32_123, %c8_124, %c1_125], %alloc_2[%arg2, %c0_126, %c0_127, %c0_128, %c0_129, %c0_130] [%c1_131, %c1_132, %c4_133, %c8_134, %c4_135, %c8_136] [%c1024_137, %c1024_138, %c8_139, %c128_140, %c32_141, %c1_142]) : (memref<1x1x4x8x4x8xi32, 2 : i32>, memref<2x1x32x32xi32, 1 : i32>)
%subview_143 = memref.subview %alloc_1[0, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<1x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_144 = arith.constant 0 : index
%c0_145 = arith.constant 0 : index
%c0_146 = arith.constant 0 : index
%c0_147 = arith.constant 0 : index
%c0_148 = arith.constant 0 : index
%c0_149 = arith.constant 0 : index
%c1_150 = arith.constant 1 : index
%c1_151 = arith.constant 1 : index
%c8_152 = arith.constant 8 : index
%c4_153 = arith.constant 4 : index
%c8_154 = arith.constant 8 : index
%c4_155 = arith.constant 4 : index
%c1024_156 = arith.constant 1024 : index
%c1024_157 = arith.constant 1024 : index
%c128_158 = arith.constant 128 : index
%c32_159 = arith.constant 32 : index
%c4_160 = arith.constant 4 : index
%c1_161 = arith.constant 1 : index
%c0_162 = arith.constant 0 : index
%c0_163 = arith.constant 0 : index
%c0_164 = arith.constant 0 : index
%c0_165 = arith.constant 0 : index
%c0_166 = arith.constant 0 : index
%c1_167 = arith.constant 1 : index
%c1_168 = arith.constant 1 : index
%c8_169 = arith.constant 8 : index
%c4_170 = arith.constant 4 : index
%c8_171 = arith.constant 8 : index
%c4_172 = arith.constant 4 : index
%c2048_173 = arith.constant 2048 : index
%c1024_174 = arith.constant 1024 : index
%c4_175 = arith.constant 4 : index
%c256_176 = arith.constant 256 : index
%c32_177 = arith.constant 32 : index
%c1_178 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc[%c0_144, %c0_145, %c0_146, %c0_147, %c0_148, %c0_149] [%c1_150, %c1_151, %c8_152, %c4_153, %c8_154, %c4_155] [%c1024_156, %c1024_157, %c128_158, %c32_159, %c4_160, %c1_161], %alloc_1[%c0_162, %arg3, %c0_163, %c0_164, %c0_165, %c0_166] [%c1_167, %c1_168, %c8_169, %c4_170, %c8_171, %c4_172] [%c2048_173, %c1024_174, %c4_175, %c256_176, %c32_177, %c1_178]) : (memref<1x1x8x4x8x4xi32, 2 : i32>, memref<1x2x32x32xi32, 1 : i32>)
%subview_179 = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_206 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_207 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_208 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_206, %subview_207 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_208 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_209: i32, %out: i32):
%3 = arith.muli %in, %in_209 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
%subview_180 = memref.subview %alloc_4[%arg2, %arg3, 0, 0] [1, 1, 32, 32] [1, 1, 1, 1] : memref<2x2x32x32xi32, 1 : i32> to memref<1x1x32x32xi32, strided<[2048, 1024, 32, 1], offset: ?>, 1 : i32>
%c0_181 = arith.constant 0 : index
%c0_182 = arith.constant 0 : index
%c1_183 = arith.constant 1 : index
%c1_184 = arith.constant 1 : index
%c32_185 = arith.constant 32 : index
%c32_186 = arith.constant 32 : index
%c2048_187 = arith.constant 2048 : index
%c1024_188 = arith.constant 1024 : index
%c32_189 = arith.constant 32 : index
%c1_190 = arith.constant 1 : index
%c0_191 = arith.constant 0 : index
%c0_192 = arith.constant 0 : index
%c0_193 = arith.constant 0 : index
%c0_194 = arith.constant 0 : index
%c1_195 = arith.constant 1 : index
%c1_196 = arith.constant 1 : index
%c8_197 = arith.constant 8 : index
%c4_198 = arith.constant 4 : index
%c8_199 = arith.constant 8 : index
%c4_200 = arith.constant 4 : index
%c2048_201 = arith.constant 2048 : index
%c1024_202 = arith.constant 1024 : index
%c16 = arith.constant 16 : index
%c4_203 = arith.constant 4 : index
%c128_204 = arith.constant 128 : index
%c1_205 = arith.constant 1 : index
air.dma_memcpy_nd (%alloc_4[%arg2, %arg3, %c0_181, %c0_182] [%c1_183, %c1_184, %c32_185, %c32_186] [%c2048_187, %c1024_188, %c32_189, %c1_190], %alloc_3[%arg2, %arg3, %c0_191, %c0_192, %c0_193, %c0_194] [%c1_195, %c1_196, %c8_197, %c4_198, %c8_199, %c4_200] [%c2048_201, %c1024_202, %c16, %c4_203, %c128_204, %c1_205]) : (memref<2x2x32x32xi32, 1 : i32>, memref<2x2x8x8x4x4xi32, 2 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%c64 = arith.constant 64 : index
%c64_92 = arith.constant 64 : index
%c128_93 = arith.constant 128 : index
%c1_94 = arith.constant 1 : index
%c0_95 = arith.constant 0 : index
%c0_96 = arith.constant 0 : index
%c0_97 = arith.constant 0 : index
%c0_98 = arith.constant 0 : index
%c2_99 = arith.constant 2 : index
%c32_100 = arith.constant 32 : index
%c2_101 = arith.constant 2 : index
%c32_102 = arith.constant 32 : index
%c2048_103 = arith.constant 2048 : index
%c32_104 = arith.constant 32 : index
%c1024_105 = arith.constant 1024 : index
%c1_106 = arith.constant 1 : index
air.dma_memcpy_nd (%2[%arg0, %arg1] [%c64, %c64_92] [%c128_93, %c1_94], %alloc_4[%c0_95, %c0_96, %c0_97, %c0_98] [%c2_99, %c32_100, %c2_101, %c32_102] [%c2048_103, %c32_104, %c1024_105, %c1_106]) : (memref<128x128xi32>, memref<2x2x32x32xi32, 1 : i32>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before AMDAIEAIRDmaToAMDAIEDma (iree-amdaie-air-dma-to-amdaie-dma) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
memref.assume_alignment %0, 64 : memref<128x256xi32>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
memref.assume_alignment %1, 64 : memref<256x128xi32>
%2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
memref.assume_alignment %2, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
air.dma_memcpy_nd (%alloc_2[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %0[%c0, %c0, %arg0, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (memref<2x1x32x32xi32, 1 : i32>, memref<128x256xi32>)
air.dma_memcpy_nd (%alloc_1[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %1[%c0, %c0, %c0, %arg1] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (memref<1x2x32x32xi32, 1 : i32>, memref<256x128xi32>)
scf.forall (%arg2, %arg3) in (2, 2) {
air.dma_memcpy_nd (%alloc_0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %alloc_2[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (memref<1x1x4x8x4x8xi32, 2 : i32>, memref<2x1x32x32xi32, 1 : i32>)
air.dma_memcpy_nd (%alloc[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %alloc_1[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (memref<1x1x8x4x8x4xi32, 2 : i32>, memref<1x2x32x32xi32, 1 : i32>)
%subview = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_5 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_5, %subview_6 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_7 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_8: i32, %out: i32):
%3 = arith.muli %in, %in_8 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%3 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
air.dma_memcpy_nd (%alloc_2[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %0[%c0, %c0, %arg0, %3] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (memref<2x1x32x32xi32, 1 : i32>, memref<128x256xi32>)
%4 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
air.dma_memcpy_nd (%alloc_1[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %1[%c0, %c0, %4, %arg1] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (memref<1x2x32x32xi32, 1 : i32>, memref<256x128xi32>)
scf.forall (%arg3, %arg4) in (2, 2) {
air.dma_memcpy_nd (%alloc_0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %alloc_2[%arg3, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (memref<1x1x4x8x4x8xi32, 2 : i32>, memref<2x1x32x32xi32, 1 : i32>)
air.dma_memcpy_nd (%alloc[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %alloc_1[%c0, %arg4, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (memref<1x1x8x4x8x4xi32, 2 : i32>, memref<1x2x32x32xi32, 1 : i32>)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_5 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_5 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_6 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%5 = arith.muli %in, %in_7 : i32
%6 = arith.addi %out, %5 : i32
linalg.yield %6 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
air.dma_memcpy_nd (%alloc_2[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %0[%c0, %c0, %arg0, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (memref<2x1x32x32xi32, 1 : i32>, memref<128x256xi32>)
air.dma_memcpy_nd (%alloc_1[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %1[%c0, %c0, %c224, %arg1] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (memref<1x2x32x32xi32, 1 : i32>, memref<256x128xi32>)
scf.forall (%arg2, %arg3) in (2, 2) {
air.dma_memcpy_nd (%alloc_0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %alloc_2[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (memref<1x1x4x8x4x8xi32, 2 : i32>, memref<2x1x32x32xi32, 1 : i32>)
air.dma_memcpy_nd (%alloc[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %alloc_1[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (memref<1x1x8x4x8x4xi32, 2 : i32>, memref<1x2x32x32xi32, 1 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_5 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_5 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_6 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%3 = arith.muli %in, %in_7 : i32
%4 = arith.addi %out, %3 : i32
linalg.yield %4 : i32
}
}
}
}
air.dma_memcpy_nd (%alloc_4[%arg2, %arg3, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %alloc_3[%arg2, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (memref<2x2x32x32xi32, 1 : i32>, memref<2x2x8x8x4x4xi32, 2 : i32>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
air.dma_memcpy_nd (%2[%arg0, %arg1] [%c64, %c64] [%c128, %c1], %alloc_4[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (memref<128x128xi32>, memref<2x2x32x32xi32, 1 : i32>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before AMDAIENormalizeLoopBounds (iree-amdaie-normalize-loop-bounds) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%0 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%1 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%2 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%3 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%4 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%5 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%6 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%7 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%8 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%9 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%10 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%11 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%12 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%13 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%14 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%15 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%16 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%17 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%18 = amdaie.logicalobjectfifo.from_memref %alloc_3, {} : memref<2x2x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%19 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%20 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%21 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%22 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%23 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%24 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %21, 64 : memref<128x256xi32>
%25 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%26 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%27 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%28 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %25, 64 : memref<256x128xi32>
%29 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%30 = amdaie.logicalobjectfifo.from_memref %29, {} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %29, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) = (0, 0) to (128, 128) step (64, 64) {
%31 = amdaie.dma_cpy_nd(%12[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %22[%c0, %c0, %arg0, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%32 = amdaie.dma_cpy_nd(%6[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %26[%c0, %c0, %c0, %arg1] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg2, %arg3) in (2, 2) {
%36 = amdaie.dma_cpy_nd(%3[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %13[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%37 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %7[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%subview = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_5 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_5, %subview_6 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_7 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_8: i32, %out: i32):
%38 = arith.muli %in, %in_8 : i32
%39 = arith.addi %out, %38 : i32
linalg.yield %39 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
scf.for %arg2 = %c1 to %c7 step %c1 {
%36 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%37 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %23[%c0, %c0, %arg0, %36] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%38 = affine.apply affine_map<(d0) -> (d0 * 32)>(%arg2)
%39 = amdaie.dma_cpy_nd(%8[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %27[%c0, %c0, %38, %arg1] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg3, %arg4) in (2, 2) {
%40 = amdaie.dma_cpy_nd(%4[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %15[%arg3, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%41 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %9[%c0, %arg4, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_5 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_5 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_6 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%42 = arith.muli %in, %in_7 : i32
%43 = arith.addi %out, %42 : i32
linalg.yield %43 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%33 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %24[%c0, %c0, %arg0, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%34 = amdaie.dma_cpy_nd(%10[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %28[%c0, %c0, %c224, %arg1] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg2, %arg3) in (2, 2) {
%36 = amdaie.dma_cpy_nd(%5[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %17[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%37 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %11[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_5 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_5 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_6 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%39 = arith.muli %in, %in_7 : i32
%40 = arith.addi %out, %39 : i32
linalg.yield %40 : i32
}
}
}
}
%38 = amdaie.dma_cpy_nd(%19[%arg2, %arg3, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %18[%arg2, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%35 = amdaie.dma_cpy_nd(%30[%arg0, %arg1] [%c64, %c64] [%c128, %c1], %20[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before AMDAIEInsertCores (iree-amdaie-insert-cores) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%0 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%1 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%2 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%3 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%4 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%5 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%6 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%7 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%8 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%9 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%10 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%11 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%12 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%13 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%14 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%15 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%16 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%17 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%18 = amdaie.logicalobjectfifo.from_memref %alloc_3, {} : memref<2x2x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%19 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%20 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%21 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%22 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%23 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%24 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %21, 64 : memref<128x256xi32>
%25 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%26 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%27 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%28 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %25, 64 : memref<256x128xi32>
%29 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%30 = amdaie.logicalobjectfifo.from_memref %29, {} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %29, 64 : memref<128x128xi32>
%c2_5 = arith.constant 2 : index
%c1_6 = arith.constant 1 : index
%c2_7 = arith.constant 2 : index
%c1_8 = arith.constant 1 : index
scf.forall (%arg0, %arg1) in (2, 2) {
%31 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%32 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%33 = amdaie.dma_cpy_nd(%12[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %22[%c0, %c0, %32, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%34 = amdaie.dma_cpy_nd(%6[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %26[%c0, %c0, %c0, %31] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg2, %arg3) in (2, 2) {
%38 = amdaie.dma_cpy_nd(%3[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %13[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%39 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %7[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%subview = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.fill ins(%c0_i32 : i32) outs(%subview : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_10 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_10, %subview_11 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_12 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%40 = arith.muli %in, %in_13 : i32
%41 = arith.addi %out, %40 : i32
linalg.yield %41 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%c6 = arith.constant 6 : index
%c0_9 = arith.constant 0 : index
scf.for %arg2 = %c0_9 to %c6 step %c1 {
%38 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg2)
%39 = affine.apply affine_map<(d0) -> (d0 * 32)>(%38)
%40 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %23[%c0, %c0, %32, %39] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%41 = affine.apply affine_map<(d0) -> (d0 * 32)>(%38)
%42 = amdaie.dma_cpy_nd(%8[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %27[%c0, %c0, %41, %31] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg3, %arg4) in (2, 2) {
%43 = amdaie.dma_cpy_nd(%4[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %15[%arg3, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%44 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %9[%c0, %arg4, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_10 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_10 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_11 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_12: i32, %out: i32):
%45 = arith.muli %in, %in_12 : i32
%46 = arith.addi %out, %45 : i32
linalg.yield %46 : i32
}
}
}
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%35 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %24[%c0, %c0, %32, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%36 = amdaie.dma_cpy_nd(%10[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %28[%c0, %c0, %c224, %31] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg2, %arg3) in (2, 2) {
%38 = amdaie.dma_cpy_nd(%5[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %17[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%39 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %11[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_10 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_10 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_11 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_12: i32, %out: i32):
%41 = arith.muli %in, %in_12 : i32
%42 = arith.addi %out, %41 : i32
linalg.yield %42 : i32
}
}
}
}
%40 = amdaie.dma_cpy_nd(%19[%arg2, %arg3, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %18[%arg2, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>)
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%37 = amdaie.dma_cpy_nd(%30[%32, %31] [%c64, %c64] [%c128, %c1], %20[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before AMDAIELocalizeLogicalObjectfifo (iree-amdaie-localize-logicalobjectfifo) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%0 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%1 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%2 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%3 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%4 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%5 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%6 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%7 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%8 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%9 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%10 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%11 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%12 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%13 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%14 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%15 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%16 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%17 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%18 = amdaie.logicalobjectfifo.from_memref %alloc_3, {} : memref<2x2x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%19 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%20 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%21 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%22 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%23 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%24 = amdaie.logicalobjectfifo.from_memref %21, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %21, 64 : memref<128x256xi32>
%25 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%26 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%27 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%28 = amdaie.logicalobjectfifo.from_memref %25, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %25, 64 : memref<256x128xi32>
%29 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%30 = amdaie.logicalobjectfifo.from_memref %29, {} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %29, 64 : memref<128x128xi32>
%c2_5 = arith.constant 2 : index
%c1_6 = arith.constant 1 : index
%c2_7 = arith.constant 2 : index
%c1_8 = arith.constant 1 : index
scf.forall (%arg0, %arg1) in (2, 2) {
%31 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%32 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%33 = amdaie.dma_cpy_nd(%12[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %22[%c0, %c0, %32, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%34 = amdaie.dma_cpy_nd(%6[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %26[%c0, %c0, %c0, %31] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg2, %arg3) in (2, 2) {
%38 = amdaie.dma_cpy_nd(%3[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %13[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%39 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %7[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%subview = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%c2_10 = arith.constant 2 : index
%40 = arith.addi %arg2, %c2_10 : index
%tile = amdaie.tile(%arg3, %40)
%41 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%38)
amdaie.logicalobjectfifo.consume(%39)
linalg.fill ins(%c0_i32 : i32) outs(%subview : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_11 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_11, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%42 = arith.muli %in, %in_14 : i32
%43 = arith.addi %out, %42 : i32
linalg.yield %43 : i32
}
}
}
}
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%c6 = arith.constant 6 : index
%c0_9 = arith.constant 0 : index
scf.for %arg2 = %c0_9 to %c6 step %c1 {
%38 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg2)
%39 = affine.apply affine_map<(d0) -> (d0 * 32)>(%38)
%40 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %23[%c0, %c0, %32, %39] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%41 = affine.apply affine_map<(d0) -> (d0 * 32)>(%38)
%42 = amdaie.dma_cpy_nd(%8[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %27[%c0, %c0, %41, %31] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg3, %arg4) in (2, 2) {
%43 = amdaie.dma_cpy_nd(%4[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %15[%arg3, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%44 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %9[%c0, %arg4, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%c2_10 = arith.constant 2 : index
%45 = arith.addi %arg3, %c2_10 : index
%tile = amdaie.tile(%arg4, %45)
%46 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%43)
amdaie.logicalobjectfifo.consume(%44)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_11 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_12 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%47 = arith.muli %in, %in_13 : i32
%48 = arith.addi %out, %47 : i32
linalg.yield %48 : i32
}
}
}
}
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%35 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %24[%c0, %c0, %32, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%36 = amdaie.dma_cpy_nd(%10[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %28[%c0, %c0, %c224, %31] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg2, %arg3) in (2, 2) {
%38 = amdaie.dma_cpy_nd(%5[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %17[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%39 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %11[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%40 = amdaie.dma_cpy_nd(%19[%arg2, %arg3, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %18[%arg2, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>)
%c2_10 = arith.constant 2 : index
%41 = arith.addi %arg2, %c2_10 : index
%tile = amdaie.tile(%arg3, %41)
%42 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%38)
amdaie.logicalobjectfifo.consume(%39)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_11 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_12 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%43 = arith.muli %in, %in_13 : i32
%44 = arith.addi %out, %43 : i32
linalg.yield %44 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%40)
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%37 = amdaie.dma_cpy_nd(%30[%32, %31] [%c64, %c64] [%c128, %c1], %20[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c7 = arith.constant 7 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%0 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%1 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%2 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%3 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%4 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%5 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%6 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%7 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%8 = amdaie.logicalobjectfifo.from_memref %7, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%9 = amdaie.logicalobjectfifo.from_memref %7, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
%10 = amdaie.logicalobjectfifo.from_memref %7, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %7, 64 : memref<128x256xi32>
%11 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%12 = amdaie.logicalobjectfifo.from_memref %11, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%13 = amdaie.logicalobjectfifo.from_memref %11, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
%14 = amdaie.logicalobjectfifo.from_memref %11, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %11, 64 : memref<256x128xi32>
%15 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%16 = amdaie.logicalobjectfifo.from_memref %15, {} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %15, 64 : memref<128x128xi32>
%c2_5 = arith.constant 2 : index
%c1_6 = arith.constant 1 : index
%c2_7 = arith.constant 2 : index
%c1_8 = arith.constant 1 : index
scf.forall (%arg0, %arg1) in (2, 2) {
%17 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%18 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%19 = amdaie.dma_cpy_nd(%3[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %8[%c0, %c0, %18, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%20 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %12[%c0, %c0, %c0, %17] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%21 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%22 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%23 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%24 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
scf.forall (%arg2, %arg3) in (2, 2) {
%34 = amdaie.dma_cpy_nd(%22[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %24[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%35 = amdaie.dma_cpy_nd(%21[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %23[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%subview = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%c2_10 = arith.constant 2 : index
%36 = arith.addi %arg2, %c2_10 : index
%tile = amdaie.tile(%arg3, %36)
%37 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%34)
amdaie.logicalobjectfifo.consume(%35)
linalg.fill ins(%c0_i32 : i32) outs(%subview : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_11 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_11, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%38 = arith.muli %in, %in_14 : i32
%39 = arith.addi %out, %38 : i32
linalg.yield %39 : i32
}
}
}
}
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%c6 = arith.constant 6 : index
%c0_9 = arith.constant 0 : index
scf.for %arg2 = %c0_9 to %c6 step %c1 {
%34 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg2)
%35 = affine.apply affine_map<(d0) -> (d0 * 32)>(%34)
%36 = amdaie.dma_cpy_nd(%4[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %9[%c0, %c0, %18, %35] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%37 = affine.apply affine_map<(d0) -> (d0 * 32)>(%34)
%38 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %13[%c0, %c0, %37, %17] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%39 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%40 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%41 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%42 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
scf.forall (%arg3, %arg4) in (2, 2) {
%43 = amdaie.dma_cpy_nd(%40[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %42[%arg3, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%44 = amdaie.dma_cpy_nd(%39[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %41[%c0, %arg4, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%c2_10 = arith.constant 2 : index
%45 = arith.addi %arg3, %c2_10 : index
%tile = amdaie.tile(%arg4, %45)
%46 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%43)
amdaie.logicalobjectfifo.consume(%44)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_11 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_12 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%47 = arith.muli %in, %in_13 : i32
%48 = arith.addi %out, %47 : i32
linalg.yield %48 : i32
}
}
}
}
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%25 = amdaie.dma_cpy_nd(%5[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %10[%c0, %c0, %18, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%26 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %14[%c0, %c0, %c224, %17] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%27 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%28 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%29 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%30 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%31 = amdaie.logicalobjectfifo.from_memref %alloc_3, {} : memref<2x2x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>
%32 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
scf.forall (%arg2, %arg3) in (2, 2) {
%34 = amdaie.dma_cpy_nd(%28[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %30[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%35 = amdaie.dma_cpy_nd(%27[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %29[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%36 = amdaie.dma_cpy_nd(%32[%arg2, %arg3, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %31[%arg2, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>)
%c2_10 = arith.constant 2 : index
%37 = arith.addi %arg2, %c2_10 : index
%tile = amdaie.tile(%arg3, %37)
%38 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%34)
amdaie.logicalobjectfifo.consume(%35)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_11 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_11 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_12 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_13: i32, %out: i32):
%39 = arith.muli %in, %in_13 : i32
%40 = arith.addi %out, %39 : i32
linalg.yield %40 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%36)
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%33 = amdaie.dma_cpy_nd(%16[%18, %17] [%c64, %c64] [%c128, %c1], %6[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before AMDAIEDistributeCoresAndObjectFifos (iree-amdaie-distribute-cores-and-objectfifos) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%0 = amdaie.logicalobjectfifo.from_memref %alloc_1, {} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%1 = amdaie.logicalobjectfifo.from_memref %alloc_2, {} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_3 = memref.alloc() : memref<2x2x8x8x4x4xi32, 2 : i32>
%alloc_4 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%2 = amdaie.logicalobjectfifo.from_memref %alloc_4, {} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%3 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%4 = amdaie.logicalobjectfifo.from_memref %3, {} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %3, 64 : memref<128x256xi32>
%5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%6 = amdaie.logicalobjectfifo.from_memref %5, {} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %5, 64 : memref<256x128xi32>
%7 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%8 = amdaie.logicalobjectfifo.from_memref %7, {} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %7, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) in (2, 2) {
%9 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%10 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%11 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%12 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c0, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%13 = amdaie.logicalobjectfifo.from_memref %alloc, {} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%14 = amdaie.logicalobjectfifo.from_memref %alloc_0, {} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
scf.forall (%arg2, %arg3) in (2, 2) {
%19 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%20 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%subview = memref.subview %alloc_3[%arg2, %arg3, 0, 0, 0, 0] [1, 1, 8, 8, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
%21 = arith.addi %arg2, %c2 : index
%tile = amdaie.tile(%arg3, %21)
%22 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%19)
amdaie.logicalobjectfifo.consume(%20)
linalg.fill ins(%c0_i32 : i32) outs(%subview : memref<1x1x8x8x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview_5 = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_7 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview_5, %subview_6 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_7 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_8: i32, %out: i32):
%23 = arith.muli %in, %in_8 : i32
%24 = arith.addi %out, %23 : i32
linalg.yield %24 : i32
}
}
}
}
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%c6 = arith.constant 6 : index
scf.for %arg2 = %c0 to %c6 step %c1 {
%19 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg2)
%20 = affine.apply affine_map<(d0) -> (d0 * 32)>(%19)
%21 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %20] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%22 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %20, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
scf.forall (%arg3, %arg4) in (2, 2) {
%23 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%arg3, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%24 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %arg4, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%25 = arith.addi %arg3, %c2 : index
%tile = amdaie.tile(%arg4, %25)
%26 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%23)
amdaie.logicalobjectfifo.consume(%24)
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c8 step %c1 {
scf.for %arg7 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg7, %arg5, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_5 = memref.subview %alloc[0, 0, %arg6, %arg7, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc_3[%arg3, %arg4, %arg6, %arg5, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_5 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_6 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%27 = arith.muli %in, %in_7 : i32
%28 = arith.addi %out, %27 : i32
linalg.yield %28 : i32
}
}
}
}
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
}
%15 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%16 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c224, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%17 = amdaie.logicalobjectfifo.from_memref %alloc_3, {} : memref<2x2x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>
scf.forall (%arg2, %arg3) in (2, 2) {
%19 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%arg2, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%20 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%21 = amdaie.dma_cpy_nd(%2[%arg2, %arg3, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %17[%arg2, %arg3, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<2x2x8x8x4x4xi32, 2 : i32>>)
%22 = arith.addi %arg2, %c2 : index
%tile = amdaie.tile(%arg3, %22)
%23 = amdaie.core(%tile) {
amdaie.logicalobjectfifo.consume(%19)
amdaie.logicalobjectfifo.consume(%20)
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c8 step %c1 {
scf.for %arg6 = %c0 to %c4 step %c1 {
%subview = memref.subview %alloc_0[0, 0, %arg6, %arg4, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_5 = memref.subview %alloc[0, 0, %arg5, %arg6, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_6 = memref.subview %alloc_3[%arg2, %arg3, %arg5, %arg4, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<2x2x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_5 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_6 : memref<1x1x1x1x4x4xi32, strided<[2048, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_7: i32, %out: i32):
%24 = arith.muli %in, %in_7 : i32
%25 = arith.addi %out, %24 : i32
linalg.yield %25 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%21)
amdaie.end
}
} {mapping = [#gpu.thread<y>, #gpu.thread<x>]}
%18 = amdaie.dma_cpy_nd(%8[%10, %9] [%c64, %c64] [%c128, %c1], %2[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_4 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x2x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
return
}
}
// -----// IR Dump Before CSE (cse) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c3 = arith.constant 3 : index
%c6 = arith.constant 6 : index
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%tile = amdaie.tile(%c0, %c1)
%tile_3 = amdaie.tile(%c1, %c1)
%0 = amdaie.logicalobjectfifo.from_memref %alloc_1, {%tile} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_4 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_5 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%tile_6 = amdaie.tile(%c1, %c1)
%1 = amdaie.logicalobjectfifo.from_memref %alloc_4, {%tile_6} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_7 = memref.alloc() : memref<1x1x8x8x4x4xi32, 2 : i32>
%alloc_8 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%alloc_9 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%tile_10 = amdaie.tile(%c0, %c1)
%tile_11 = amdaie.tile(%c1, %c1)
%2 = amdaie.logicalobjectfifo.from_memref %alloc_8, {%tile_10} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%3 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%tile_12 = amdaie.tile(%c1, %c0)
%4 = amdaie.logicalobjectfifo.from_memref %3, {%tile_12} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %3, 64 : memref<128x256xi32>
%5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%tile_13 = amdaie.tile(%c0, %c0)
%tile_14 = amdaie.tile(%c1, %c0)
%6 = amdaie.logicalobjectfifo.from_memref %5, {%tile_13} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %5, 64 : memref<256x128xi32>
%7 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%tile_15 = amdaie.tile(%c0, %c0)
%tile_16 = amdaie.tile(%c1, %c0)
%8 = amdaie.logicalobjectfifo.from_memref %7, {%tile_15} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %7, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) in (2, 2) {
%9 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%10 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%11 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%12 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c0, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%tile_17 = amdaie.tile(%c1, %c3)
%13 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_17} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_18 = amdaie.tile(%c1, %c3)
%14 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_18} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_19 = amdaie.tile(%c1, %c3)
%15 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_19} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_20 = amdaie.tile(%c0, %c3)
%16 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_20} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_21 = amdaie.tile(%c1, %c3)
%17 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_21} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_22 = amdaie.tile(%c1, %c3)
%18 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_22} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_23 = amdaie.tile(%c1, %c3)
%19 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_23} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_24 = amdaie.tile(%c1, %c2)
%20 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_24} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%21 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%22 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%23 = amdaie.dma_cpy_nd(%20[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%tile_25 = amdaie.tile(%c0, %c2)
%tile_26 = amdaie.tile(%c0, %c2)
%24 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_26} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_27 = amdaie.tile(%c0, %c2)
%25 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_27} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_28 = amdaie.tile(%c0, %c2)
%26 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_28} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_29 = amdaie.tile(%c0, %c2)
%27 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_29} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%28 = amdaie.core(%tile_25) {
%76 = amdaie.logicalobjectfifo.access(%27, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%26, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%79 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%23)
amdaie.logicalobjectfifo.consume(%21)
linalg.fill ins(%c0_i32 : i32) outs(%79 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c0, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%80 = arith.muli %in, %in_67 : i32
%81 = arith.addi %out, %80 : i32
linalg.yield %81 : i32
}
}
}
}
amdaie.end
}
%tile_30 = amdaie.tile(%c1, %c2)
%tile_31 = amdaie.tile(%c1, %c2)
%29 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_31} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_32 = amdaie.tile(%c1, %c2)
%30 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_32} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_33 = amdaie.tile(%c1, %c2)
%31 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_33} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_34 = amdaie.tile(%c1, %c2)
%32 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_34} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%33 = amdaie.core(%tile_30) {
%76 = amdaie.logicalobjectfifo.access(%32, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%30, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%79 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%23)
amdaie.logicalobjectfifo.consume(%22)
linalg.fill ins(%c0_i32 : i32) outs(%79 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c0, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%80 = arith.muli %in, %in_67 : i32
%81 = arith.addi %out, %80 : i32
linalg.yield %81 : i32
}
}
}
}
amdaie.end
}
%34 = amdaie.dma_cpy_nd(%19[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%tile_35 = amdaie.tile(%c0, %c3)
%tile_36 = amdaie.tile(%c0, %c3)
%35 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_36} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_37 = amdaie.tile(%c0, %c3)
%36 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_37} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_38 = amdaie.tile(%c0, %c3)
%37 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_38} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_39 = amdaie.tile(%c0, %c3)
%38 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_39} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%39 = amdaie.core(%tile_35) {
%76 = amdaie.logicalobjectfifo.access(%38, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%37, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%36, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%79 = amdaie.logicalobjectfifo.access(%35, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%34)
amdaie.logicalobjectfifo.consume(%21)
linalg.fill ins(%c0_i32 : i32) outs(%79 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c1, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%80 = arith.muli %in, %in_67 : i32
%81 = arith.addi %out, %80 : i32
linalg.yield %81 : i32
}
}
}
}
amdaie.end
}
%tile_40 = amdaie.tile(%c1, %c3)
%tile_41 = amdaie.tile(%c1, %c3)
%40 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_41} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_42 = amdaie.tile(%c1, %c3)
%41 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_42} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_43 = amdaie.tile(%c1, %c3)
%42 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_43} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_44 = amdaie.tile(%c1, %c3)
%43 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_44} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%44 = amdaie.core(%tile_40) {
%76 = amdaie.logicalobjectfifo.access(%43, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%42, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%41, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%79 = amdaie.logicalobjectfifo.access(%40, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%34)
amdaie.logicalobjectfifo.consume(%22)
linalg.fill ins(%c0_i32 : i32) outs(%79 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c1, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%80 = arith.muli %in, %in_67 : i32
%81 = arith.addi %out, %80 : i32
linalg.yield %81 : i32
}
}
}
}
amdaie.end
}
scf.for %arg2 = %c0 to %c6 step %c1 {
%76 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg2)
%77 = affine.apply affine_map<(d0) -> (d0 * 32)>(%76)
%78 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %77] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%79 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %77, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%80 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%81 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%82 = amdaie.dma_cpy_nd(%20[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%tile_65 = amdaie.tile(%c0, %c2)
%tile_66 = amdaie.tile(%c0, %c2)
%83 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_66} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_67 = amdaie.tile(%c0, %c2)
%84 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_67} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_68 = amdaie.tile(%c0, %c2)
%85 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_68} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%86 = amdaie.core(%tile_65) {
%100 = amdaie.logicalobjectfifo.access(%85, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%101 = amdaie.logicalobjectfifo.access(%84, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%102 = amdaie.logicalobjectfifo.access(%83, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%82)
amdaie.logicalobjectfifo.consume(%80)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %102[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_81 = memref.subview %101[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_82 = memref.subview %100[%c0, %c0, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_81 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_82 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_83: i32, %out: i32):
%103 = arith.muli %in, %in_83 : i32
%104 = arith.addi %out, %103 : i32
linalg.yield %104 : i32
}
}
}
}
amdaie.end
}
%tile_69 = amdaie.tile(%c1, %c2)
%tile_70 = amdaie.tile(%c1, %c2)
%87 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_70} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_71 = amdaie.tile(%c1, %c2)
%88 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_71} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_72 = amdaie.tile(%c1, %c2)
%89 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_72} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%90 = amdaie.core(%tile_69) {
%100 = amdaie.logicalobjectfifo.access(%89, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%101 = amdaie.logicalobjectfifo.access(%88, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%102 = amdaie.logicalobjectfifo.access(%87, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%82)
amdaie.logicalobjectfifo.consume(%81)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %102[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_81 = memref.subview %101[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_82 = memref.subview %100[%c0, %c1, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_81 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_82 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_83: i32, %out: i32):
%103 = arith.muli %in, %in_83 : i32
%104 = arith.addi %out, %103 : i32
linalg.yield %104 : i32
}
}
}
}
amdaie.end
}
%91 = amdaie.dma_cpy_nd(%18[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%tile_73 = amdaie.tile(%c0, %c3)
%tile_74 = amdaie.tile(%c0, %c3)
%92 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_74} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_75 = amdaie.tile(%c0, %c3)
%93 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_75} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_76 = amdaie.tile(%c0, %c3)
%94 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_76} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%95 = amdaie.core(%tile_73) {
%100 = amdaie.logicalobjectfifo.access(%94, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%101 = amdaie.logicalobjectfifo.access(%93, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%102 = amdaie.logicalobjectfifo.access(%92, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%91)
amdaie.logicalobjectfifo.consume(%80)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %102[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_81 = memref.subview %101[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_82 = memref.subview %100[%c1, %c0, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_81 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_82 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_83: i32, %out: i32):
%103 = arith.muli %in, %in_83 : i32
%104 = arith.addi %out, %103 : i32
linalg.yield %104 : i32
}
}
}
}
amdaie.end
}
%tile_77 = amdaie.tile(%c1, %c3)
%tile_78 = amdaie.tile(%c1, %c3)
%96 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_78} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_79 = amdaie.tile(%c1, %c3)
%97 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_79} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_80 = amdaie.tile(%c1, %c3)
%98 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_80} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%99 = amdaie.core(%tile_77) {
%100 = amdaie.logicalobjectfifo.access(%98, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%101 = amdaie.logicalobjectfifo.access(%97, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%102 = amdaie.logicalobjectfifo.access(%96, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%91)
amdaie.logicalobjectfifo.consume(%81)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %102[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_81 = memref.subview %101[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_82 = memref.subview %100[%c1, %c1, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_81 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_82 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_83: i32, %out: i32):
%103 = arith.muli %in, %in_83 : i32
%104 = arith.addi %out, %103 : i32
linalg.yield %104 : i32
}
}
}
}
amdaie.end
}
}
%45 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%46 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c224, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%tile_45 = amdaie.tile(%c1, %c3)
%47 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_45} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_46 = amdaie.tile(%c1, %c2)
%48 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_46} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_47 = amdaie.tile(%c0, %c3)
%49 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_47} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%tile_48 = amdaie.tile(%c0, %c2)
%50 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_48} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%51 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%52 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%53 = amdaie.dma_cpy_nd(%20[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%54 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %50[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%tile_49 = amdaie.tile(%c0, %c2)
%tile_50 = amdaie.tile(%c0, %c2)
%55 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_50} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_51 = amdaie.tile(%c0, %c2)
%56 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_51} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_52 = amdaie.tile(%c0, %c2)
%57 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_52} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%58 = amdaie.core(%tile_49) {
%76 = amdaie.logicalobjectfifo.access(%57, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%56, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%55, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%53)
amdaie.logicalobjectfifo.consume(%51)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c0, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%79 = arith.muli %in, %in_67 : i32
%80 = arith.addi %out, %79 : i32
linalg.yield %80 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%54)
amdaie.end
}
%59 = amdaie.dma_cpy_nd(%2[%c0, %c1, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %48[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%tile_53 = amdaie.tile(%c1, %c2)
%tile_54 = amdaie.tile(%c1, %c2)
%60 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_54} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_55 = amdaie.tile(%c1, %c2)
%61 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_55} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_56 = amdaie.tile(%c1, %c2)
%62 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_56} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%63 = amdaie.core(%tile_53) {
%76 = amdaie.logicalobjectfifo.access(%62, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%61, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%60, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%53)
amdaie.logicalobjectfifo.consume(%52)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c0, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%79 = arith.muli %in, %in_67 : i32
%80 = arith.addi %out, %79 : i32
linalg.yield %80 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%59)
amdaie.end
}
%64 = amdaie.dma_cpy_nd(%17[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%65 = amdaie.dma_cpy_nd(%2[%c1, %c0, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %49[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%tile_57 = amdaie.tile(%c0, %c3)
%tile_58 = amdaie.tile(%c0, %c3)
%66 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_58} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_59 = amdaie.tile(%c0, %c3)
%67 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_59} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_60 = amdaie.tile(%c0, %c3)
%68 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_60} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%69 = amdaie.core(%tile_57) {
%76 = amdaie.logicalobjectfifo.access(%68, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%67, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%66, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%64)
amdaie.logicalobjectfifo.consume(%51)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c1, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%79 = arith.muli %in, %in_67 : i32
%80 = arith.addi %out, %79 : i32
linalg.yield %80 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%65)
amdaie.end
}
%70 = amdaie.dma_cpy_nd(%2[%c1, %c1, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %47[%c1, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%tile_61 = amdaie.tile(%c1, %c3)
%tile_62 = amdaie.tile(%c1, %c3)
%71 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_62} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_63 = amdaie.tile(%c1, %c3)
%72 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_63} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_64 = amdaie.tile(%c1, %c3)
%73 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile_64} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%74 = amdaie.core(%tile_61) {
%76 = amdaie.logicalobjectfifo.access(%73, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%77 = amdaie.logicalobjectfifo.access(%72, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%78 = amdaie.logicalobjectfifo.access(%71, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%64)
amdaie.logicalobjectfifo.consume(%52)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %78[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_65 = memref.subview %77[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_66 = memref.subview %76[%c1, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_65 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_66 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_67: i32, %out: i32):
%79 = arith.muli %in, %in_67 : i32
%80 = arith.addi %out, %79 : i32
linalg.yield %80 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%70)
amdaie.end
}
%75 = amdaie.dma_cpy_nd(%8[%10, %9] [%c64, %c64] [%c128, %c1], %2[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_9 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
memref.dealloc %alloc_7 : memref<1x1x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_4 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_8 : memref<2x2x32x32xi32, 1 : i32>
return
}
}
// -----// IR Dump Before Canonicalizer (canonicalize) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c3 = arith.constant 3 : index
%c6 = arith.constant 6 : index
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%alloc_2 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%tile = amdaie.tile(%c0, %c1)
%0 = amdaie.logicalobjectfifo.from_memref %alloc_1, {%tile} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_3 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%alloc_4 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%tile_5 = amdaie.tile(%c1, %c1)
%1 = amdaie.logicalobjectfifo.from_memref %alloc_3, {%tile_5} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_6 = memref.alloc() : memref<1x1x8x8x4x4xi32, 2 : i32>
%alloc_7 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%alloc_8 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%2 = amdaie.logicalobjectfifo.from_memref %alloc_7, {%tile} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%3 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%tile_9 = amdaie.tile(%c1, %c0)
%4 = amdaie.logicalobjectfifo.from_memref %3, {%tile_9} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %3, 64 : memref<128x256xi32>
%5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%tile_10 = amdaie.tile(%c0, %c0)
%6 = amdaie.logicalobjectfifo.from_memref %5, {%tile_10} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %5, 64 : memref<256x128xi32>
%7 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%8 = amdaie.logicalobjectfifo.from_memref %7, {%tile_10} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %7, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) in (2, 2) {
%9 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%10 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%11 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%12 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c0, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%tile_11 = amdaie.tile(%c1, %c3)
%13 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_11} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_12 = amdaie.tile(%c0, %c3)
%14 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_12} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%15 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_11} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_13 = amdaie.tile(%c1, %c2)
%16 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_13} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%17 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%18 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%19 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%tile_14 = amdaie.tile(%c0, %c2)
%20 = amdaie.logicalobjectfifo.from_memref %alloc_6, {%tile_14} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%21 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_14} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%22 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_14} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%23 = amdaie.core(%tile_14) {
%48 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%22, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%21, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%19)
amdaie.logicalobjectfifo.consume(%17)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c0, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%52 = arith.muli %in, %in_17 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
%24 = amdaie.logicalobjectfifo.from_memref %alloc_6, {%tile_13} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%25 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_13} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%26 = amdaie.core(%tile_13) {
%48 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%16, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%19)
amdaie.logicalobjectfifo.consume(%18)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c0, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%52 = arith.muli %in, %in_17 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
%27 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%28 = amdaie.logicalobjectfifo.from_memref %alloc_6, {%tile_12} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%29 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_12} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%30 = amdaie.core(%tile_12) {
%48 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%14, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%27)
amdaie.logicalobjectfifo.consume(%17)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c1, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%52 = arith.muli %in, %in_17 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
%31 = amdaie.logicalobjectfifo.from_memref %alloc_6, {%tile_11} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%32 = amdaie.core(%tile_11) {
%48 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%13, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%15, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%27)
amdaie.logicalobjectfifo.consume(%18)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c1, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%52 = arith.muli %in, %in_17 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
scf.for %arg2 = %c0 to %c6 step %c1 {
%48 = affine.apply affine_map<(d0) -> (d0 + 1)>(%arg2)
%49 = affine.apply affine_map<(d0) -> (d0 * 32)>(%48)
%50 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %49] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%51 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %49, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%52 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%53 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%54 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%55 = amdaie.core(%tile_14) {
%60 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%22, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%62 = amdaie.logicalobjectfifo.access(%21, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%54)
amdaie.logicalobjectfifo.consume(%52)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %62[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %61[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %60[%c0, %c0, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%63 = arith.muli %in, %in_17 : i32
%64 = arith.addi %out, %63 : i32
linalg.yield %64 : i32
}
}
}
}
amdaie.end
}
%56 = amdaie.core(%tile_13) {
%60 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%62 = amdaie.logicalobjectfifo.access(%16, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%54)
amdaie.logicalobjectfifo.consume(%53)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %62[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %61[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %60[%c0, %c1, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%63 = arith.muli %in, %in_17 : i32
%64 = arith.addi %out, %63 : i32
linalg.yield %64 : i32
}
}
}
}
amdaie.end
}
%57 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%58 = amdaie.core(%tile_12) {
%60 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%14, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%62 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%57)
amdaie.logicalobjectfifo.consume(%52)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %62[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %61[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %60[%c1, %c0, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%63 = arith.muli %in, %in_17 : i32
%64 = arith.addi %out, %63 : i32
linalg.yield %64 : i32
}
}
}
}
amdaie.end
}
%59 = amdaie.core(%tile_11) {
%60 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%13, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%62 = amdaie.logicalobjectfifo.access(%15, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%57)
amdaie.logicalobjectfifo.consume(%53)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %62[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %61[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %60[%c1, %c1, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%63 = arith.muli %in, %in_17 : i32
%64 = arith.addi %out, %63 : i32
linalg.yield %64 : i32
}
}
}
}
amdaie.end
}
}
%33 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%34 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c224, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%35 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%36 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%37 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%38 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %20[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%39 = amdaie.core(%tile_14) {
%48 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%22, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%21, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%37)
amdaie.logicalobjectfifo.consume(%35)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c0, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%51 = arith.muli %in, %in_17 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%38)
amdaie.end
}
%40 = amdaie.dma_cpy_nd(%2[%c0, %c1, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %24[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%41 = amdaie.core(%tile_13) {
%48 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%16, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%37)
amdaie.logicalobjectfifo.consume(%36)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c0, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%51 = arith.muli %in, %in_17 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%40)
amdaie.end
}
%42 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%43 = amdaie.dma_cpy_nd(%2[%c1, %c0, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %28[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%44 = amdaie.core(%tile_12) {
%48 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%14, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%42)
amdaie.logicalobjectfifo.consume(%35)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c1, %c0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%51 = arith.muli %in, %in_17 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%43)
amdaie.end
}
%45 = amdaie.dma_cpy_nd(%2[%c1, %c1, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %31[%c1, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%46 = amdaie.core(%tile_11) {
%48 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%13, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%15, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%42)
amdaie.logicalobjectfifo.consume(%36)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_15 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_16 = memref.subview %48[%c1, %c1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_15 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_16 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_17: i32, %out: i32):
%51 = arith.muli %in, %in_17 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%45)
amdaie.end
}
%47 = amdaie.dma_cpy_nd(%8[%10, %9] [%c64, %c64] [%c128, %c1], %2[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_8 : memref<2x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_4 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
memref.dealloc %alloc_6 : memref<1x1x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_3 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_7 : memref<2x2x32x32xi32, 1 : i32>
return
}
}
// -----// IR Dump Before AMDAIEDmaToCircularDma (iree-amdaie-dma-to-circular-dma) //----- //
module {
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c3 = arith.constant 3 : index
%c6 = arith.constant 6 : index
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%tile = amdaie.tile(%c0, %c1)
%0 = amdaie.logicalobjectfifo.from_memref %alloc_1, {%tile} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%tile_3 = amdaie.tile(%c1, %c1)
%1 = amdaie.logicalobjectfifo.from_memref %alloc_2, {%tile_3} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_4 = memref.alloc() : memref<1x1x8x8x4x4xi32, 2 : i32>
%alloc_5 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%2 = amdaie.logicalobjectfifo.from_memref %alloc_5, {%tile} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%3 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%tile_6 = amdaie.tile(%c1, %c0)
%4 = amdaie.logicalobjectfifo.from_memref %3, {%tile_6} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %3, 64 : memref<128x256xi32>
%5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%tile_7 = amdaie.tile(%c0, %c0)
%6 = amdaie.logicalobjectfifo.from_memref %5, {%tile_7} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %5, 64 : memref<256x128xi32>
%7 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%8 = amdaie.logicalobjectfifo.from_memref %7, {%tile_7} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %7, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) in (2, 2) {
%9 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%10 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%11 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%12 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c0, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%tile_8 = amdaie.tile(%c1, %c3)
%13 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_8} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_9 = amdaie.tile(%c0, %c3)
%14 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_9} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%15 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_8} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_10 = amdaie.tile(%c1, %c2)
%16 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_10} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%17 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%18 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%19 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%tile_11 = amdaie.tile(%c0, %c2)
%20 = amdaie.logicalobjectfifo.from_memref %alloc_4, {%tile_11} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%21 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_11} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%22 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_11} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%23 = amdaie.core(%tile_11) {
%48 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%22, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%21, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%19)
amdaie.logicalobjectfifo.consume(%17)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[0, 0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%52 = arith.muli %in, %in_14 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
%24 = amdaie.logicalobjectfifo.from_memref %alloc_4, {%tile_10} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%25 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_10} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%26 = amdaie.core(%tile_10) {
%48 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%16, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%19)
amdaie.logicalobjectfifo.consume(%18)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[0, 1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%52 = arith.muli %in, %in_14 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
%27 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%28 = amdaie.logicalobjectfifo.from_memref %alloc_4, {%tile_9} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%29 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_9} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%30 = amdaie.core(%tile_9) {
%48 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%14, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%27)
amdaie.logicalobjectfifo.consume(%17)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[1, 0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%52 = arith.muli %in, %in_14 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
%31 = amdaie.logicalobjectfifo.from_memref %alloc_4, {%tile_8} : memref<1x1x8x8x4x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>
%32 = amdaie.core(%tile_8) {
%48 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%13, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%15, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
%51 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%27)
amdaie.logicalobjectfifo.consume(%18)
linalg.fill ins(%c0_i32 : i32) outs(%51 : memref<1x1x8x8x4x4xi32, 2 : i32>)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[1, 1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%52 = arith.muli %in, %in_14 : i32
%53 = arith.addi %out, %52 : i32
linalg.yield %53 : i32
}
}
}
}
amdaie.end
}
scf.for %arg2 = %c0 to %c6 step %c1 {
%48 = affine.apply affine_map<(d0) -> (d0 * 32 + 32)>(%arg2)
%49 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %48] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%50 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %48, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%51 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%52 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%53 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%54 = amdaie.core(%tile_11) {
%59 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%60 = amdaie.logicalobjectfifo.access(%22, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%21, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%53)
amdaie.logicalobjectfifo.consume(%51)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %61[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %60[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %59[0, 0, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%62 = arith.muli %in, %in_14 : i32
%63 = arith.addi %out, %62 : i32
linalg.yield %63 : i32
}
}
}
}
amdaie.end
}
%55 = amdaie.core(%tile_10) {
%59 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%60 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%16, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%53)
amdaie.logicalobjectfifo.consume(%52)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %61[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %60[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %59[0, 1, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%62 = arith.muli %in, %in_14 : i32
%63 = arith.addi %out, %62 : i32
linalg.yield %63 : i32
}
}
}
}
amdaie.end
}
%56 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%57 = amdaie.core(%tile_9) {
%59 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%60 = amdaie.logicalobjectfifo.access(%14, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%56)
amdaie.logicalobjectfifo.consume(%51)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %61[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %60[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %59[1, 0, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%62 = arith.muli %in, %in_14 : i32
%63 = arith.addi %out, %62 : i32
linalg.yield %63 : i32
}
}
}
}
amdaie.end
}
%58 = amdaie.core(%tile_8) {
%59 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%60 = amdaie.logicalobjectfifo.access(%13, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%61 = amdaie.logicalobjectfifo.access(%15, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%56)
amdaie.logicalobjectfifo.consume(%52)
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c8 step %c1 {
scf.for %arg5 = %c0 to %c4 step %c1 {
%subview = memref.subview %61[0, 0, %arg5, %arg3, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %60[0, 0, %arg4, %arg5, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %59[1, 1, %arg4, %arg3, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%62 = arith.muli %in, %in_14 : i32
%63 = arith.addi %out, %62 : i32
linalg.yield %63 : i32
}
}
}
}
amdaie.end
}
}
%33 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c224] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%34 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c224, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%35 = amdaie.dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%36 = amdaie.dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%37 = amdaie.dma_cpy_nd(%16[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%38 = amdaie.dma_cpy_nd(%2[%c0, %c0, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %20[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%39 = amdaie.core(%tile_11) {
%48 = amdaie.logicalobjectfifo.access(%20, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%22, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%21, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%37)
amdaie.logicalobjectfifo.consume(%35)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[0, 0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%51 = arith.muli %in, %in_14 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%38)
amdaie.end
}
%40 = amdaie.dma_cpy_nd(%2[%c0, %c1, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %24[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%41 = amdaie.core(%tile_10) {
%48 = amdaie.logicalobjectfifo.access(%24, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%25, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%16, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%37)
amdaie.logicalobjectfifo.consume(%36)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[0, 1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%51 = arith.muli %in, %in_14 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%40)
amdaie.end
}
%42 = amdaie.dma_cpy_nd(%15[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c256, %c32, %c8, %c1], %1[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c4, %c8, %c4, %c8] [%c1024, %c1024, %c8, %c128, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>)
%43 = amdaie.dma_cpy_nd(%2[%c1, %c0, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %28[%c1, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%44 = amdaie.core(%tile_9) {
%48 = amdaie.logicalobjectfifo.access(%28, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%14, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%29, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%42)
amdaie.logicalobjectfifo.consume(%35)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[1, 0, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%51 = arith.muli %in, %in_14 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%43)
amdaie.end
}
%45 = amdaie.dma_cpy_nd(%2[%c1, %c1, %c0, %c0] [%c1, %c1, %c32, %c32] [%c2048, %c1024, %c32, %c1], %31[%c1, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c16, %c4, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>>)
%46 = amdaie.core(%tile_8) {
%48 = amdaie.logicalobjectfifo.access(%31, None) : !amdaie.logicalobjectfifo<memref<1x1x8x8x4x4xi32, 2 : i32>> -> memref<1x1x8x8x4x4xi32, 2 : i32>
%49 = amdaie.logicalobjectfifo.access(%13, None) : !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>> -> memref<1x1x8x4x8x4xi32, 2 : i32>
%50 = amdaie.logicalobjectfifo.access(%15, None) : !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>> -> memref<1x1x4x8x4x8xi32, 2 : i32>
amdaie.logicalobjectfifo.consume(%42)
amdaie.logicalobjectfifo.consume(%36)
scf.for %arg2 = %c0 to %c8 step %c1 {
scf.for %arg3 = %c0 to %c8 step %c1 {
scf.for %arg4 = %c0 to %c4 step %c1 {
%subview = memref.subview %50[0, 0, %arg4, %arg2, 0, 0] [1, 1, 1, 1, 4, 8] [1, 1, 1, 1, 1, 1] : memref<1x1x4x8x4x8xi32, 2 : i32> to memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>
%subview_12 = memref.subview %49[0, 0, %arg3, %arg4, 0, 0] [1, 1, 1, 1, 8, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x4x8x4xi32, 2 : i32> to memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>
%subview_13 = memref.subview %48[1, 1, %arg3, %arg2, 0, 0] [1, 1, 1, 1, 4, 4] [1, 1, 1, 1, 1, 1] : memref<1x1x8x8x4x4xi32, 2 : i32> to memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>
linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d2, d5, d3, d6, d8)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d2, d1, d4, d5, d8, d7)>, affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d4, d3, d6, d7)>], iterator_types = ["parallel", "parallel", "reduction", "parallel", "parallel", "reduction", "parallel", "parallel", "reduction"]} ins(%subview, %subview_12 : memref<1x1x1x1x4x8xi32, strided<[1024, 1024, 256, 32, 8, 1], offset: ?>, 2 : i32>, memref<1x1x1x1x8x4xi32, strided<[1024, 1024, 128, 32, 4, 1], offset: ?>, 2 : i32>) outs(%subview_13 : memref<1x1x1x1x4x4xi32, strided<[1024, 1024, 128, 16, 4, 1], offset: ?>, 2 : i32>) attrs = {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64], [0, 0, 1], [1, 1, 0, 0, 0, 0]]>, packing_config = #amdaie.packing_config<packing_config = [{packedSizes = [32, 32, 32], transposePackIndices = [1], unpackEmpty = [false], innerPerm = [[1, 0]], outerPerm = [[0, 1]]}, {packedSizes = [0, 0, 0, 4, 4, 8], transposePackIndices = [0, 1, 2], unpackEmpty = [false, false, true], innerPerm = [[0, 1], [1, 0], [0, 1]], outerPerm = [[0, 1, 3, 2], [0, 1, 3, 2], [0, 1, 3, 2]]}]>} {
^bb0(%in: i32, %in_14: i32, %out: i32):
%51 = arith.muli %in, %in_14 : i32
%52 = arith.addi %out, %51 : i32
linalg.yield %52 : i32
}
}
}
}
amdaie.logicalobjectfifo.produce(%45)
amdaie.end
}
%47 = amdaie.dma_cpy_nd(%8[%10, %9] [%c64, %c64] [%c128, %c1], %2[%c0, %c0, %c0, %c0] [%c2, %c32, %c2, %c32] [%c2048, %c32, %c1024, %c1]) : (!amdaie.logicalobjectfifo<memref<128x128xi32>>, !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>)
} {mapping = [#gpu.block<y>, #gpu.block<x>]}
memref.dealloc %alloc_0 : memref<1x1x4x8x4x8xi32, 2 : i32>
memref.dealloc %alloc : memref<1x1x8x4x8x4xi32, 2 : i32>
memref.dealloc %alloc_4 : memref<1x1x8x8x4x4xi32, 2 : i32>
memref.dealloc %alloc_1 : memref<1x2x32x32xi32, 1 : i32>
memref.dealloc %alloc_2 : memref<2x1x32x32xi32, 1 : i32>
memref.dealloc %alloc_5 : memref<2x2x32x32xi32, 1 : i32>
return
}
}
// -----// IR Dump Before AMDAIECreateAIEWorkgroup (iree-amdaie-create-aie-workgroup) //----- //
func.func @matmul_i8_i32_dispatch_0_matmul_128x128x256_i32() attributes {translation_info = #iree_codegen.translation_info<Custom>} {
%c3 = arith.constant 3 : index
%c6 = arith.constant 6 : index
%c64 = arith.constant 64 : index
%c16 = arith.constant 16 : index
%c224 = arith.constant 224 : index
%c128 = arith.constant 128 : index
%c4096 = arith.constant 4096 : index
%c2048 = arith.constant 2048 : index
%c256 = arith.constant 256 : index
%c8192 = arith.constant 8192 : index
%c1024 = arith.constant 1024 : index
%c32 = arith.constant 32 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
%c8 = arith.constant 8 : index
%c1 = arith.constant 1 : index
%c0 = arith.constant 0 : index
%c0_i32 = arith.constant 0 : i32
%alloc = memref.alloc() : memref<1x1x8x4x8x4xi32, 2 : i32>
%alloc_0 = memref.alloc() : memref<1x1x4x8x4x8xi32, 2 : i32>
%alloc_1 = memref.alloc() : memref<1x2x32x32xi32, 1 : i32>
%tile = amdaie.tile(%c0, %c1)
%0 = amdaie.logicalobjectfifo.from_memref %alloc_1, {%tile} : memref<1x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>
%alloc_2 = memref.alloc() : memref<2x1x32x32xi32, 1 : i32>
%tile_3 = amdaie.tile(%c1, %c1)
%1 = amdaie.logicalobjectfifo.from_memref %alloc_2, {%tile_3} : memref<2x1x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>
%alloc_4 = memref.alloc() : memref<1x1x8x8x4x4xi32, 2 : i32>
%alloc_5 = memref.alloc() : memref<2x2x32x32xi32, 1 : i32>
%2 = amdaie.logicalobjectfifo.from_memref %alloc_5, {%tile} : memref<2x2x32x32xi32, 1 : i32> -> !amdaie.logicalobjectfifo<memref<2x2x32x32xi32, 1 : i32>>
%3 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<128x256xi32>
%tile_6 = amdaie.tile(%c1, %c0)
%4 = amdaie.logicalobjectfifo.from_memref %3, {%tile_6} : memref<128x256xi32> -> !amdaie.logicalobjectfifo<memref<128x256xi32>>
memref.assume_alignment %3, 64 : memref<128x256xi32>
%5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : memref<256x128xi32>
%tile_7 = amdaie.tile(%c0, %c0)
%6 = amdaie.logicalobjectfifo.from_memref %5, {%tile_7} : memref<256x128xi32> -> !amdaie.logicalobjectfifo<memref<256x128xi32>>
memref.assume_alignment %5, 64 : memref<256x128xi32>
%7 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c0) : memref<128x128xi32>
%8 = amdaie.logicalobjectfifo.from_memref %7, {%tile_7} : memref<128x128xi32> -> !amdaie.logicalobjectfifo<memref<128x128xi32>>
memref.assume_alignment %7, 64 : memref<128x128xi32>
scf.forall (%arg0, %arg1) in (2, 2) {
%9 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg1)
%10 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0)
%11 = amdaie.dma_cpy_nd(%1[%c0, %c0, %c0, %c0] [%c2, %c1, %c32, %c32] [%c1024, %c1024, %c32, %c1], %4[%c0, %c0, %10, %c0] [%c2, %c1, %c32, %c32] [%c8192, %c32, %c256, %c1]) : (!amdaie.logicalobjectfifo<memref<2x1x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<128x256xi32>>)
%12 = amdaie.dma_cpy_nd(%0[%c0, %c0, %c0, %c0] [%c1, %c2, %c32, %c32] [%c2048, %c1024, %c32, %c1], %6[%c0, %c0, %c0, %9] [%c1, %c2, %c32, %c32] [%c4096, %c32, %c128, %c1]) : (!amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>, !amdaie.logicalobjectfifo<memref<256x128xi32>>)
%tile_8 = amdaie.tile(%c1, %c3)
%13 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_8} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%tile_9 = amdaie.tile(%c0, %c3)
%14 = amdaie.logicalobjectfifo.from_memref %alloc, {%tile_9} : memref<1x1x8x4x8x4xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>
%15 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_8} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%tile_10 = amdaie.tile(%c1, %c2)
%16 = amdaie.logicalobjectfifo.from_memref %alloc_0, {%tile_10} : memref<1x1x4x8x4x8xi32, 2 : i32> -> !amdaie.logicalobjectfifo<memref<1x1x4x8x4x8xi32, 2 : i32>>
%17 = amdaie.circular_dma_cpy_nd(%14[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%18 = amdaie.circular_dma_cpy_nd(%13[%c0, %c0, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c1024, %c1024, %c128, %c32, %c4, %c1], %0[%c0, %c1, %c0, %c0, %c0, %c0] [%c1, %c1, %c8, %c4, %c8, %c4] [%c2048, %c1024, %c4, %c256, %c32, %c1]) : (!amdaie.logicalobjectfifo<memref<1x1x8x4x8x4xi32, 2 : i32>>, !amdaie.logicalobjectfifo<memref<1x2x32x32xi32, 1 : i32>>)
%19 = amdai
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment