Skip to content

Instantly share code, notes, and snippets.

#map = affine_map<(d0, d1) -> (d0, d1)>
func.func @linalg_transpose_2d(%arg0: tensor<?x?xf32>, %permutation: tensor<2xindex>, %1 : tensor<?x?xf32>) -> tensor<?x?xf32> {
%2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0 : tensor<?x?xf32>) outs(%1 : tensor<?x?xf32>) {
^bb0(%in: f32, %out: f32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%indexes = tensor.from_elements %idx0, %idx1 : tensor<2xindex>
%cst0 = arith.constant 0 : index
%cst1 = arith.constant 1 : index
%permutationIdx0 = tensor.extract %permutation[%cst0] : tensor<2xindex>
#map = affine_map<(d0, d1) -> (d0, d1)>
func.func @linalg_transpose_2d(%arg0: tensor<?x?xi32>, %dims: tensor<2xindex>, %1 : tensor<?x?xi32>) -> tensor<?x?xi32> {
%2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0 : tensor<?x?xi32>) outs(%1 : tensor<?x?xi32>) {
^bb0(%in: i32, %out: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%indexes = tensor.from_elements %idx0, %idx1 : tensor<2xindex>
%cst0 = arith.constant 0 : index
%cst1 = arith.constant 1 : index
func.func @mega_lib_pad_positive_3d_i32(%arg0: tensor<?x?x?xi32>, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: index, %arg7: i32) -> tensor<?x?x?xi32> {
%padded = tensor.pad %arg0 low[%arg1, %arg2, %arg3] high[%arg4, %arg5, %arg6] {
^bb0(%arg8: index, %arg9: index, %arg10: index):
tensor.yield %arg7 : i32
} : tensor<?x?x?xi32> to tensor<?x?x?xi32>
return %padded : tensor<?x?x?xi32>
}
func.func @mega_lib_pad_positive_2d_f32(%arg0: tensor<?x?xf32>, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: f32) -> tensor<?x?xf32> {
%padded = tensor.pad %arg0 low[%arg1, %arg2] high[%arg3, %arg4] {
^bb0(%arg6: index, %arg7: index):
module {
func.func @pad_mega_lib_3d_i32(%arg0: tensor<?x?x?xi32>, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: index, %arg7: i32) -> tensor<?x?x?xi32> {
%padded = tensor.pad %arg0 low[%arg4, %arg5, %arg6] high[%arg1, %arg2, %arg3] {
^bb0(%arg8: index, %arg9: index, %arg10: index):
tensor.yield %arg7 : i32
} : tensor<?x?x?xi32> to tensor<?x?x?xi32>
return %padded : tensor<?x?x?xi32>
}
func.func @pad_mega_lib_2d_f32(%arg0: tensor<?x?xf32>, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: f32) -> tensor<?x?xf32> {
%padded = tensor.pad %arg0 low[%arg3, %arg4] high[%arg1, %arg2] {
func.func @tensor_pad_2d_f32(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
%0 = arith.constant dense<0.0> : tensor<f32>
%1 = "stablehlo.pad"(%arg0, %0) {
edge_padding_high = array<i64: 2, 3>,
edge_padding_low = array<i64: 4, 5>,
interior_padding = array<i64: 2, 3>
} : (tensor<?x?xf32>, tensor<f32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
graph(%self : Tensor,
%lhs.1 : Tensor,
%rhs.1 : Tensor):
%5 : float = aten::Float(%lhs.1) # /usr/local/google/home/cathyzhyi/items/test/test.py:5:8
%8 : float = aten::Float(%rhs.1) # /usr/local/google/home/cathyzhyi/items/test/test.py:5:21
%sub.1 : float = aten::sub(%5, %8) # /usr/local/google/home/cathyzhyi/items/test/test.py:5:8
%12 : int = aten::ceil(%sub.1) # /usr/local/google/home/cathyzhyi/items/test/test.py:6:9
return (%12)
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "CeilFloatModule"} {
func private @__torch__.torch_mlir_e2e_test.test_suite.scalar.CeilFloatModule.forward(%arg0: !torch.nn.Module<"__torch__.torch_mlir_e2e_test.test_suite.scalar.CeilFloatModule"> loc(unknown), %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[],f64>} loc(unknown), %arg2: !torch.tensor {torch.type_bound = !torch.vtensor<[],f64>} loc(unknown)) -> !torch.int {
%1 = torch.aten.Float.Tensor %arg1 : !torch.tensor -> !torch.float loc(#loc1)
%2 = torch.aten.Float.Tensor %arg2 : !torch.tensor -> !torch.float loc(#loc2)
%3 = torch.aten.sub.float %1, %2 : !torch.float, !torch.float -> !torch.float loc(#loc1)
%4 = torch.aten.ceil.float %3 : !torch.float -> !torch.int loc(#loc3)
return %4 : !torch.int loc(#loc0)
} loc(#loc0)
torch.class_type @__torch__.torch_mlir_e2e_test.test_suite.scalar.CeilFloatModule {
This file has been truncated, but you can view the full file.
ninja: Entering directory `/usr/local/google/home/cathyzhyi/items/test/torch-mlir/build'
ninja: no work to do.
Args: /usr/local/google/home/cathyzhyi/items/test/torch-mlir/build/bin/torch-mlir-opt -pass-pipeline=torchscript-module-to-torch-backend-pipeline -mlir-print-ir-after-all -debug forward.mlir
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementTypeInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface)
This file has been truncated, but you can view the full file.
Requirement already satisfied: transformers[torch] in ./generate-tests/venv/lib/python3.9/site-packages (4.18.0)
Requirement already satisfied: sacremoses in ./generate-tests/venv/lib/python3.9/site-packages (from transformers[torch]) (0.0.49)
Requirement already satisfied: packaging>=20.0 in ./generate-tests/venv/lib/python3.9/site-packages (from transformers[torch]) (21.3)
Requirement already satisfied: pyyaml>=5.1 in ./generate-tests/venv/lib/python3.9/site-packages (from transformers[torch]) (6.0)
Requirement already satisfied: filelock in ./generate-tests/venv/lib/python3.9/site-packages (from transformers[torch]) (3.6.0)
Requirement already satisfied: requests in ./generate-tests/venv/lib/python3.9/site-packages (from transformers[torch]) (2.27.1)
Requirement already satisfied: numpy>=1.17 in ./generate-tests/venv/lib/python3.9/site-packages (from transformers[torch]) (1.22.3)
Requirement already satisfied: huggingface-hub<1.0,>=0.1.0 in ./generate-tests/venv/lib/python3.9/site-packages (from transforme
// -----// IR Dump After FuncBufferize //----- //
module {
func @collapse_dynamic_shape_of_slice(%arg0: memref<?x?x?x?xf32>, %arg1: index, %arg2: index, %arg3: index) -> memref<2x?x?xf32> {
%0 = bufferization.to_tensor %arg0 : memref<?x?x?x?xf32>
%1 = tensor.extract_slice %0[0, 0, %arg1, %arg1] [%arg2, %arg2, %arg3, %arg3] [1, 1, 1, 1] : tensor<?x?x?x?xf32> to tensor<?x?x?x?xf32>
%2 = tensor.cast %1 : tensor<?x?x?x?xf32> to tensor<2x?x?x?xf32>
%3 = tensor.collapse_shape %2 [[0], [1, 2], [3]] : tensor<2x?x?x?xf32> into tensor<2x?x?xf32>
%4 = bufferization.to_memref %3 : memref<2x?x?xf32>
return %4 : memref<2x?x?xf32>