module {
func.func @unpack(%arg0: tensor<2x2x32x32xf32>, %arg1: tensor<64x64xf32>) -> tensor<64x64xf32> {
%unpack = tensor.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [32, 32] into %arg1 : tensor<2x2x32x32xf32> -> tensor<64x64xf32>
return %unpack : tensor<64x64xf32>
}
transform.sequence failures(propagate) {
^bb0(%arg0: !transform.any_op):
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg0 : (!transform.any_op) -> !transform.op<"tensor.unpack">
%empty_op, %transpose_op, %collapse_shape_op, %extract_slice_op = transform.structured.lower_unpack %0 : (!transform.op<"tensor.unpack">) -> (!transform.op<"tensor.empty">, !transform.op<"linalg.transpose">, !transform.op<"tensor.collapse_shape">, !transform.op<"tensor.extract_slice">)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// BEFORE | |
#map = affine_map<(d0, d1) -> (d1)> | |
#map1 = affine_map<(d0, d1) -> (d0, d1)> | |
module attributes {torch.debug_module_name = "MLP"} { | |
func.func @MLP(%arg0: tensor<128x262144xf32>, %arg1: tensor<128xf32>, %arg2: tensor<64x512x512xf32>) -> tensor<64x128xf32> { | |
%cst = arith.constant 0.000000e+00 : f32 | |
%collapsed = tensor.collapse_shape %arg2 [[0], [1, 2]] : tensor<64x512x512xf32> into tensor<64x262144xf32> | |
%0 = tensor.empty() : tensor<262144x128xf32> | |
%transposed = linalg.transpose ins(%arg0 : tensor<128x262144xf32>) outs(%0 : tensor<262144x128xf32>) permutation = [1, 0] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -one-shot-bufferize="bufferize-function-boundaries function-boundary-type-conversion=identity-layout-map" -expand-realloc -canonicalize -ownership-based-buffer-deallocation -canonicalize | |
module { | |
func.func @unpack2(%arg0: tensor<1x2x2x2x2xf32>, %arg1: tensor<1x2x2x4xf32>) -> tensor<1x2x2x4xf32> { | |
%c0 = arith.constant 0 : index | |
%c1 = arith.constant 1 : index | |
%c2 = arith.constant 2 : index | |
%0 = scf.for %arg2 = %c0 to %c1 step %c1 iter_args(%arg3 = %arg1) -> (tensor<1x2x2x4xf32>) { |
build/bin# cat hello.c
#include <stdio.h>
int main() {
printf("Hello, World!");
return 0;
}
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)> | |
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)> | |
#map2 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d3, d4)> | |
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> | |
#map4 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3)> | |
#map5 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d4, d2, d3)> | |
#map6 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d2, d4, d1)> | |
#map7 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> | |
#map8 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, 0)> | |
#map9 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d3, d1, d4)> |
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// tf-opt self-attention-stable-hlo.mlir -tf-lower-to-mlprogram-and-hlo -stablehlo-legalize-to-hlo -hlo-legalize-to-linalg -canonicalize -cse | |
#map = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)> | |
#map1 = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)> | |
#map2 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d3, d4)> | |
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> | |
#map4 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3)> | |
#map5 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d4, d2, d3)> | |
#map6 = affine_map<(d0, d1, d2, d3, d4) -> (d0, d2, d4, d1)> | |
#map7 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> |
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module { | |
func.func @main(%arg0: tensor<64x32x512xf32>, %arg1: tensor<64x32x512xf32>, %arg2: tensor<64x32x512xf32>) -> tensor<64x32x512xf32> { | |
%0 = stablehlo.constant dense<1.250000e-01> : tensor<64x32x8x64xf32> | |
%1 = stablehlo.constant dense<-0.000000e+00> : tensor<f32> | |
%2 = stablehlo.constant dense<0xFF800000> : tensor<f32> | |
%3 = stablehlo.constant dense<"0x189F30BC32360EBCC00C7DB954882E3B5EA0473CFA3F0FBCA0BB603B4B28C7BB487A01BC682B403B3A3589BB20F50ABACCADE0BB8CE11BBCE6D70D3CA8DBF1BAEAFD3B3CA8A832BCB8D40C3B9062E03A6ECF1B3C00A4213BD489A6BBB4D0283CA8953E3BD7F324BC9E72443C50049B3B008725BC1A3527BC9C3308BC98CD773BAAF7A9BB3067803BA04A0D3C74CD113CF8269F3A701F4D3C876217BC80EDC13B3860453B3882E7BAC8FA313CC08515BABCD9353BFAAA493CCC7566BB1733FDBB983A323C307E653A98CAC93B5D3BE8BB38B3043C76962B3C9F02B0BB5B5321BC7224093C347DC83B58287FBB106D173C6A5E4FBC00B8ABBB280EEE3BA0C2DEBB50BCF23B0857C03AC63FC5BB60E39E39578943BCA65001BC105C9B3B20792D3C642E46BB4C7953BBE035F13BFFA4E0BBFF0939BC5099163AE8144ABCA2F7163C9C3981B |
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1395 : i32}} { | |
func.func @main(%arg0: tensor<64x32x512xf32>, %arg1: tensor<64x32x512xf32>, %arg2: tensor<64x32x512xf32>) -> tensor<64x32x512xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "query,value,key", outputs = "Identity:0"}} { | |
%cst = "tf.Const"() {device = "", value = dense<"0xCC2A59BB5B6B49BC0B420FBCF2F73B3C80CD0FB9C0CC853A3C0B2A3C725039BC929D403CF082393AF4BFF93BD7D239BC5E0D06BC5827A1BA40422ABC7C73FE3BF093EB3B3AB0083C25D627BCE081103BD439BD3BC874E23A008FB2BB7874CA3B681A9A3BFC13F23B3C01CEBB2719E6BB00475CB8A8749FBAC0F80CBA64D70E3C88CB01BBC42088BB287CC13AF4DC0C3CEDB0D1BB165C9ABB58D2063C4813B83B7C81B9BB1EF622BCFDAA2ABC7877923B60149D3BFB84A6BB4E549ABB5CC1993B48FD4B3B1465493C0088413BA0A7F2BBF45655BB48C396BB706F2D3A318942BC60E7C03BE29A07BC40CCD6B924ACFBBB63C6DABBBEF71C3CB0AB673A3832973A7E071ABC283A433B6877453B295FD7BB430243BC70F060BA9078093CE0A188BB5457A73BEA004B3C002952B819DA0CBC083F2B3CE84033 |
// Self attention key = value = query
// value_dim = key_dim
%0 = "tf.Identity"(%cst_3) {_has_manual_control_dependencies = true, device = ""} : (tensor<8x64x512xf32>) -> tensor<8x64x512xf32>
%1 = "tf.Identity"(%cst_1) {_has_manual_control_dependencies = true, device = ""} : (tensor<512x8x64xf32>) -> tensor<512x8x64xf32>
%2 = "tf.Identity"(%cst_0) {_has_manual_control_dependencies = true, device = ""} : (tensor<512x8x64xf32>) -> tensor<512x8x64xf32>
%3 = "tf.Identity"(%cst) {_has_manual_control_dependencies = true, device = ""} : (tensor<512x8x64xf32>) -> tensor<512x8x64xf32>
NewerOlder