Skip to content

Instantly share code, notes, and snippets.

@Abhishek-Varma
Created October 18, 2023 17:54
Show Gist options
  • Save Abhishek-Varma/b5f8427e2612f2119b44b7f197e13026 to your computer and use it in GitHub Desktop.
Save Abhishek-Varma/b5f8427e2612f2119b44b7f197e13026 to your computer and use it in GitHub Desktop.
hal.executable public @forward_dispatch_156 {
hal.executable.variant public @cuda_nvptx_fb, target = <"cuda", "cuda-nvptx-fb", {target_arch = "sm_89"}> {
hal.executable.export public @forward_dispatch_156_conv_2d_nchw_fchw_2x640x64x64x640x3x3_f16 ordinal(0) layout(#hal.pipeline.layout<push_constants = 0, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer, ReadOnly>, <2, storage_buffer>]>]>) {
^bb0(%arg0: !hal.device):
%x, %y, %z = flow.dispatch.workgroup_count_from_slice
hal.return %x, %y, %z : index, index, index
}
builtin.module {
func.func @forward_dispatch_156_conv_2d_nchw_fchw_2x640x64x64x640x3x3_f16() {
%c99904320 = arith.constant 99904320 : index
%c0 = arith.constant 0 : index
%c752494080 = arith.constant 752494080 : index
%c752492800 = arith.constant 752492800 : index
%c750853120 = arith.constant 750853120 : index
%c68447040 = arith.constant 68447040 : index
%cst = arith.constant 0.000000e+00 : f16
%0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c99904320) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<2x640x66x66xf16>>
%1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c752494080) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<640x640x3x3xf16>>
%2 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c752492800) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<640xf16>>
%3 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%c750853120) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<640xf16>>
%4 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<2x640xf16>>
%5 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) alignment(64) offset(%c68447040) : !flow.dispatch.tensor<writeonly:tensor<2x640x64x64xf16>>
%6 = flow.dispatch.tensor.load %0, offsets = [0, 0, 0, 0], sizes = [2, 640, 66, 66], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<2x640x66x66xf16>> -> tensor<2x640x66x66xf16>
%7 = flow.dispatch.tensor.load %1, offsets = [0, 0, 0, 0], sizes = [640, 640, 3, 3], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<640x640x3x3xf16>> -> tensor<640x640x3x3xf16>
%8 = flow.dispatch.tensor.load %2, offsets = [0], sizes = [640], strides = [1] : !flow.dispatch.tensor<readonly:tensor<640xf16>> -> tensor<640xf16>
%9 = flow.dispatch.tensor.load %3, offsets = [0], sizes = [640], strides = [1] : !flow.dispatch.tensor<readonly:tensor<640xf16>> -> tensor<640xf16>
%10 = flow.dispatch.tensor.load %4, offsets = [0, 0], sizes = [2, 640], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<2x640xf16>> -> tensor<2x640xf16>
%11 = tensor.empty() : tensor<2x640x64x64xf16>
%12 = linalg.fill ins(%cst : f16) outs(%11 : tensor<2x640x64x64xf16>) -> tensor<2x640x64x64xf16>
%13 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%6, %7 : tensor<2x640x66x66xf16>, tensor<640x640x3x3xf16>) outs(%12 : tensor<2x640x64x64xf16>) -> tensor<2x640x64x64xf16>
%14 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d1)>, affine_map<(d0, d1, d2, d3) -> (d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%13, %8, %9, %10 : tensor<2x640x64x64xf16>, tensor<640xf16>, tensor<640xf16>, tensor<2x640xf16>) outs(%11 : tensor<2x640x64x64xf16>) {
^bb0(%in: f16, %in_0: f16, %in_1: f16, %in_2: f16, %out: f16):
%15 = arith.addf %in_1, %in_2 : f16
%16 = arith.addf %in, %in_0 : f16
%17 = arith.addf %16, %15 : f16
linalg.yield %17 : f16
} -> tensor<2x640x64x64xf16>
flow.dispatch.tensor.store %14, %5, offsets = [0, 0, 0, 0], sizes = [2, 640, 64, 64], strides = [1, 1, 1, 1] : tensor<2x640x64x64xf16> -> !flow.dispatch.tensor<writeonly:tensor<2x640x64x64xf16>>
return
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment