-
-
Save vivekkhandelwal1/c581d7c2a09b14f19519d3d6c10f7004 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module { | |
func.func @torch_jit(%arg0: !torch.vtensor<[1,3,240,240],f32>, %arg1: !torch.vtensor<[1],si64> , %arg2:!torch.vtensor<[32,3,3,3],f32>, %arg3: !torch.vtensor<[32],f32>) -> !torch.vtensor<[?,32,?,?],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} { | |
%233 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_onnx__Concat_1461> : tensor<4xsi64>} : () -> !torch.vtensor<[4],si64> | |
// %233 = [0, 1, 0, 1] | |
%238 = torch.operator "onnx.ConstantOfShape"(%arg1) {torch.onnx.value = dense_resource<_> : tensor<1xsi64>} : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[4],si64> | |
// // [0, 0, 0, 0] | |
%239 = torch.operator "onnx.Concat"(%233, %238) {torch.onnx.axis = 0 : si64} : (!torch.vtensor<[4],si64>, !torch.vtensor<[4],si64>) -> !torch.vtensor<[8],si64> | |
// // [1, 0, 1, 0, 0, 0, 0, 0] | |
%240 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__1> : tensor<2xsi64>} : () -> !torch.vtensor<[2],si64> | |
// %240 = [4, 2] | |
%241 = torch.operator "onnx.Reshape"(%239, %240) : (!torch.vtensor<[8],si64>, !torch.vtensor<[2],si64>) -> !torch.vtensor<[4,2],si64> | |
// [[1, 0], | |
// [1, 0], | |
// [0, 0], | |
// [0, 0], | |
// ] | |
%242 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__2> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> | |
// %242 = [0] | |
%243 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__3> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> | |
// %243 = [-1] | |
%244 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__4> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> | |
// %244 = [1] | |
%245 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__5> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> | |
// %245 = [-1] | |
%246 = torch.operator "onnx.Slice"(%241, %243, %arg1, %242, %245) : (!torch.vtensor<[4,2],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[4,2],si64> | |
// Data, starts, ends, axes, steps | |
// Data = 4x2xi64=[ | |
// [0, 1], | |
// [0, 1], | |
// [0, 0], | |
// [0, 0], | |
// ] | |
// Starts = -1 | |
// ends = 4 | |
// axes = 0 | |
// steps = -1 | |
// %246 = Result = 4x2xi64=[ | |
// [0, 0], | |
// [0, 0], | |
// [0, 1], | |
// [0, 1], | |
// ] | |
%247 = torch.operator "onnx.Transpose"(%246) {torch.onnx.perm = [1 : si64, 0 : si64]} : (!torch.vtensor<[4,2],si64>) -> !torch.vtensor<[2,4],si64> | |
// Onnx.Transpose(Result, 1, 0) | |
// Input = 4x2xi64, Result = 2x4xi64 | |
// Transpose_Result = [ | |
// [0, 0, 0, 0] | |
// [1, 1, 0, 0] | |
// ] | |
%248 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__6> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64> | |
// %248 = [-1] | |
%249 = torch.operator "onnx.Reshape"(%247, %248) : (!torch.vtensor<[2,4],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[8],si64> | |
// Reshape(Transpose_result, %248) | |
// Input = 2x4xi64, Result = 8xi64 | |
// %249 = Result_reshape = [0, 0, 0, 0, 1, 1, 0, 0] | |
%250 = torch.operator "onnx.Cast"(%249) {torch.onnx.to = 7 : si64} : (!torch.vtensor<[8],si64>) -> !torch.vtensor<[8],si64> | |
%251 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<__7> : tensor<f32>} : () -> !torch.vtensor<[],f32> | |
// %251 = [0] | |
%252 = torch.operator "onnx.Pad"(%arg0, %250, %251) {torch.onnx.mode = "constant"} : (!torch.vtensor<[1,3,240,240],f32>, !torch.vtensor<[8],si64>, !torch.vtensor<[],f32>) -> !torch.vtensor<[?,?,?,?],f32> | |
// %252 = Input = [1x3x240x240xf32], Output = [2x4x240x240xf32] | |
%253 = torch.operator "onnx.Conv"(%252, %arg2, %arg3) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [3 : si64, 3 : si64], torch.onnx.pads = [0 : si64, 0 : si64, 0 : si64, 0 : si64], torch.onnx.strides = [2 : si64, 2 : si64]} : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[32,3,3,3],f32>, !torch.vtensor<[32],f32>) -> !torch.vtensor<[?,32,?,?],f32> | |
return %253 : !torch.vtensor<[?,32,?,?],f32> | |
} | |
} | |
{-# | |
dialect_resources: { | |
builtin: { | |
_onnx__Concat_1461: "0x080000000000000000000000010000000000000000000000000000000100000000000000", | |
_: "0x080000000000000000000000", | |
__1: "0x08000000FFFFFFFFFFFFFFFF0200000000000000", | |
__2: "0x080000000000000000000000", | |
__3: "0x08000000FFFFFFFFFFFFFFFF", | |
__4: "0x080000000100000000000080", | |
__5: "0x08000000FFFFFFFFFFFFFFFF", | |
__6: "0x08000000FFFFFFFFFFFFFFFF", | |
__7: "0x0800000000000000" | |
} | |
} | |
#-} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment