Skip to content

Instantly share code, notes, and snippets.

@masahi
Created February 12, 2021 02:36
Show Gist options
  • Save masahi/231a19bbe072ec7b0b574843af244c44 to your computer and use it in GitHub Desktop.
Save masahi/231a19bbe072ec7b0b574843af244c44 to your computer and use it in GitHub Desktop.
type Storage {
}
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
let %storage_0: Storage[] = memory.alloc_storage(602112 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][0]) /* ty=Storage[] */;
let %tensor_0: Tensor[(1, 224, 224, 3), float32] = memory.alloc_tensor(%storage_0, 0 /* ty=int64 */, meta[relay.Constant][0] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][0]) /* ty=Tensor[(1, 224, 224, 3), float32] */;
%2 = fn (%p0: Tensor[(1, 3, 224, 224), float32], %p1: Tensor[(3, 1, 1), float32], %p2: Tensor[(3, 1, 1), float32], Primitive=1) -> Tensor[(1, 224, 224, 3), float32] {
%0 = multiply(%p0, %p1) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%1 = add(%0, %p2) /* ty=Tensor[(1, 3, 224, 224), float32] */;
layout_transform(%1, src_layout="NCHW", dst_layout="NHWC") /* ty=Tensor[(1, 224, 224, 3), float32] */
};
%3 = (%data, meta[relay.Constant][1] /* ty=Tensor[(3, 1, 1), float32] */, meta[relay.Constant][2] /* ty=Tensor[(3, 1, 1), float32] */);
%4 = (%tensor_0,);
let %x: () = vm.invoke_tvm_op(%2, %3, %4) /* ty=() */;
let %x1: Tensor[(1, 224, 224, 3), float32] = %tensor_0;
let %storage_01: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][1]) /* ty=Storage[] */;
let %tensor_01: Tensor[(1, 112, 112, 64), float32] = memory.alloc_tensor(%storage_01, 0 /* ty=int64 */, meta[relay.Constant][3] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][1]) /* ty=Tensor[(1, 112, 112, 64), float32] */;
%7 = fn (%p01: Tensor[(1, 224, 224, 3), float32], %p11: Tensor[(7, 7, 3, 64), float32], %p21: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 112, 112, 64), float32] {
%5 = nn.conv2d(%p01, %p11, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 112, 112, 64), float32] */;
%6 = add(%5, %p21) /* ty=Tensor[(1, 112, 112, 64), float32] */;
nn.relu(%6) /* ty=Tensor[(1, 112, 112, 64), float32] */
};
%8 = (%x1, meta[relay.Constant][4] /* ty=Tensor[(7, 7, 3, 64), float32] */, meta[relay.Constant][5] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%9 = (%tensor_01,);
let %x2: () = vm.invoke_tvm_op(%7, %8, %9) /* ty=() */;
let %x3: Tensor[(1, 112, 112, 64), float32] = %tensor_01;
let %storage_02: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][2]) /* ty=Storage[] */;
let %tensor_02: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_02, 0 /* ty=int64 */, meta[relay.Constant][6] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][2]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%12 = fn (%p02: Tensor[(1, 112, 112, 64), float32], %p12: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%10 = nn.max_pool2d(%p02, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1], layout="NHWC") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%11 = add(%10, %p12) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%11) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%13 = (%x3, meta[relay.Constant][7] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%14 = (%tensor_02,);
let %x4: () = vm.invoke_tvm_op(%12, %13, %14) /* ty=() */;
let %x5: Tensor[(1, 56, 56, 64), float32] = %tensor_02;
let %storage_03: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][3]) /* ty=Storage[] */;
let %tensor_03: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_03, 0 /* ty=int64 */, meta[relay.Constant][8] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][3]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%17 = fn (%p03: Tensor[(1, 56, 56, 64), float32], %p13: Tensor[(1, 1, 64, 64), float32], %p22: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%15 = nn.conv2d(%p03, %p13, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%16 = add(%15, %p22) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%16) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%18 = (%x5, meta[relay.Constant][9] /* ty=Tensor[(1, 1, 64, 64), float32] */, meta[relay.Constant][10] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%19 = (%tensor_03,);
let %x6: () = vm.invoke_tvm_op(%17, %18, %19) /* ty=() */;
let %x7: Tensor[(1, 56, 56, 64), float32] = %tensor_03;
let %storage_04: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][4]) /* ty=Storage[] */;
let %tensor_04: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_04, 0 /* ty=int64 */, meta[relay.Constant][11] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][4]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%22 = fn (%p04: Tensor[(1, 56, 56, 64), float32], %p14: Tensor[(3, 3, 64, 64), float32], %p23: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%20 = nn.conv2d(%p04, %p14, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%21 = add(%20, %p23) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%21) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%23 = (%x7, meta[relay.Constant][12] /* ty=Tensor[(3, 3, 64, 64), float32] */, meta[relay.Constant][13] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%24 = (%tensor_04,);
let %x8: () = vm.invoke_tvm_op(%22, %23, %24) /* ty=() */;
let %x9: Tensor[(1, 56, 56, 64), float32] = %tensor_04;
let %storage_05: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][5]) /* ty=Storage[] */;
let %tensor_05: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_05, 0 /* ty=int64 */, meta[relay.Constant][14] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][5]) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%25 = fn (%p05: Tensor[(1, 56, 56, 64), float32], %p15: Tensor[(1, 1, 64, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] {
nn.conv2d(%p05, %p15, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */
};
%26 = (%x5, meta[relay.Constant][15] /* ty=Tensor[(1, 1, 64, 256), float32] */);
%27 = (%tensor_05,);
let %x10: () = vm.invoke_tvm_op(%25, %26, %27) /* ty=() */;
let %x11: Tensor[(1, 56, 56, 256), float32] = %tensor_05;
let %storage_06: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][6]) /* ty=Storage[] */;
let %tensor_06: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_06, 0 /* ty=int64 */, meta[relay.Constant][16] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][6]) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%29 = fn (%p06: Tensor[(1, 56, 56, 64), float32], %p16: Tensor[(1, 1, 64, 256), float32], %p24: Tensor[(1, 56, 56, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] {
%28 = nn.conv2d(%p06, %p16, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */;
add(%28, %p24) /* ty=Tensor[(1, 56, 56, 256), float32] */
};
%30 = (%x9, meta[relay.Constant][17] /* ty=Tensor[(1, 1, 64, 256), float32] */, %x11);
%31 = (%tensor_06,);
let %x12: () = vm.invoke_tvm_op(%29, %30, %31) /* ty=() */;
let %x13: Tensor[(1, 56, 56, 256), float32] = %tensor_06;
let %storage_07: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][7]) /* ty=Storage[] */;
let %tensor_07: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_07, 0 /* ty=int64 */, meta[relay.Constant][18] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][7]) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%33 = fn (%p07: Tensor[(1, 56, 56, 256), float32], %p17: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] {
%32 = add(%p07, %p17) /* ty=Tensor[(1, 56, 56, 256), float32] */;
nn.relu(%32) /* ty=Tensor[(1, 56, 56, 256), float32] */
};
%34 = (%x13, meta[relay.Constant][19] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%35 = (%tensor_07,);
let %x14: () = vm.invoke_tvm_op(%33, %34, %35) /* ty=() */;
let %x15: Tensor[(1, 56, 56, 256), float32] = %tensor_07;
let %storage_08: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][8]) /* ty=Storage[] */;
let %tensor_08: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_08, 0 /* ty=int64 */, meta[relay.Constant][20] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][8]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%38 = fn (%p08: Tensor[(1, 56, 56, 256), float32], %p18: Tensor[(1, 1, 256, 64), float32], %p25: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%36 = nn.conv2d(%p08, %p18, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%37 = add(%36, %p25) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%37) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%39 = (%x15, meta[relay.Constant][21] /* ty=Tensor[(1, 1, 256, 64), float32] */, meta[relay.Constant][22] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%40 = (%tensor_08,);
let %x16: () = vm.invoke_tvm_op(%38, %39, %40) /* ty=() */;
let %x17: Tensor[(1, 56, 56, 64), float32] = %tensor_08;
let %storage_09: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][9]) /* ty=Storage[] */;
let %tensor_09: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_09, 0 /* ty=int64 */, meta[relay.Constant][23] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][9]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%43 = fn (%p09: Tensor[(1, 56, 56, 64), float32], %p19: Tensor[(3, 3, 64, 64), float32], %p26: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%41 = nn.conv2d(%p09, %p19, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%42 = add(%41, %p26) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%42) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%44 = (%x17, meta[relay.Constant][24] /* ty=Tensor[(3, 3, 64, 64), float32] */, meta[relay.Constant][25] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%45 = (%tensor_09,);
let %x18: () = vm.invoke_tvm_op(%43, %44, %45) /* ty=() */;
let %x19: Tensor[(1, 56, 56, 64), float32] = %tensor_09;
let %storage_010: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][10]) /* ty=Storage[] */;
let %tensor_010: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_010, 0 /* ty=int64 */, meta[relay.Constant][26] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][10]) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%47 = fn (%p010: Tensor[(1, 56, 56, 64), float32], %p110: Tensor[(1, 1, 64, 256), float32], %p27: Tensor[(1, 56, 56, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] {
%46 = nn.conv2d(%p010, %p110, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */;
add(%46, %p27) /* ty=Tensor[(1, 56, 56, 256), float32] */
};
%48 = (%x19, meta[relay.Constant][27] /* ty=Tensor[(1, 1, 64, 256), float32] */, %x13);
%49 = (%tensor_010,);
let %x20: () = vm.invoke_tvm_op(%47, %48, %49) /* ty=() */;
let %x21: Tensor[(1, 56, 56, 256), float32] = %tensor_010;
let %storage_011: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][11]) /* ty=Storage[] */;
let %tensor_011: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_011, 0 /* ty=int64 */, meta[relay.Constant][28] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][11]) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%51 = fn (%p011: Tensor[(1, 56, 56, 256), float32], %p111: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] {
%50 = add(%p011, %p111) /* ty=Tensor[(1, 56, 56, 256), float32] */;
nn.relu(%50) /* ty=Tensor[(1, 56, 56, 256), float32] */
};
%52 = (%x21, meta[relay.Constant][29] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%53 = (%tensor_011,);
let %x22: () = vm.invoke_tvm_op(%51, %52, %53) /* ty=() */;
let %x23: Tensor[(1, 56, 56, 256), float32] = %tensor_011;
let %storage_012: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][12]) /* ty=Storage[] */;
let %tensor_012: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_012, 0 /* ty=int64 */, meta[relay.Constant][30] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][12]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%56 = fn (%p012: Tensor[(1, 56, 56, 256), float32], %p112: Tensor[(1, 1, 256, 64), float32], %p28: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%54 = nn.conv2d(%p012, %p112, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%55 = add(%54, %p28) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%55) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%57 = (%x23, meta[relay.Constant][31] /* ty=Tensor[(1, 1, 256, 64), float32] */, meta[relay.Constant][32] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%58 = (%tensor_012,);
let %x24: () = vm.invoke_tvm_op(%56, %57, %58) /* ty=() */;
let %x25: Tensor[(1, 56, 56, 64), float32] = %tensor_012;
let %storage_013: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][13]) /* ty=Storage[] */;
let %tensor_013: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_013, 0 /* ty=int64 */, meta[relay.Constant][33] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][13]) /* ty=Tensor[(1, 56, 56, 64), float32] */;
%61 = fn (%p013: Tensor[(1, 56, 56, 64), float32], %p113: Tensor[(3, 3, 64, 64), float32], %p29: Tensor[(1, 1, 1, 64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] {
%59 = nn.conv2d(%p013, %p113, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */;
%60 = add(%59, %p29) /* ty=Tensor[(1, 56, 56, 64), float32] */;
nn.relu(%60) /* ty=Tensor[(1, 56, 56, 64), float32] */
};
%62 = (%x25, meta[relay.Constant][34] /* ty=Tensor[(3, 3, 64, 64), float32] */, meta[relay.Constant][35] /* ty=Tensor[(1, 1, 1, 64), float32] */);
%63 = (%tensor_013,);
let %x26: () = vm.invoke_tvm_op(%61, %62, %63) /* ty=() */;
let %x27: Tensor[(1, 56, 56, 64), float32] = %tensor_013;
let %storage_014: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][14]) /* ty=Storage[] */;
let %tensor_014: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_014, 0 /* ty=int64 */, meta[relay.Constant][36] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][14]) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%67 = fn (%p014: Tensor[(1, 56, 56, 64), float32], %p114: Tensor[(1, 1, 64, 256), float32], %p210: Tensor[(1, 56, 56, 256), float32], %p3: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] {
%64 = nn.conv2d(%p014, %p114, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */;
%65 = add(%64, %p210) /* ty=Tensor[(1, 56, 56, 256), float32] */;
%66 = add(%65, %p3) /* ty=Tensor[(1, 56, 56, 256), float32] */;
nn.relu(%66) /* ty=Tensor[(1, 56, 56, 256), float32] */
};
%68 = (%x27, meta[relay.Constant][37] /* ty=Tensor[(1, 1, 64, 256), float32] */, %x21, meta[relay.Constant][38] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%69 = (%tensor_014,);
let %x28: () = vm.invoke_tvm_op(%67, %68, %69) /* ty=() */;
let %x29: Tensor[(1, 56, 56, 256), float32] = %tensor_014;
let %storage_015: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][15]) /* ty=Storage[] */;
let %tensor_015: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_015, 0 /* ty=int64 */, meta[relay.Constant][39] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][15]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%72 = fn (%p015: Tensor[(1, 56, 56, 256), float32], %p115: Tensor[(1, 1, 256, 128), float32], %p211: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%70 = nn.conv2d(%p015, %p115, strides=[2, 2], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%71 = add(%70, %p211) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%71) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%73 = (%x29, meta[relay.Constant][40] /* ty=Tensor[(1, 1, 256, 128), float32] */, meta[relay.Constant][41] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%74 = (%tensor_015,);
let %x30: () = vm.invoke_tvm_op(%72, %73, %74) /* ty=() */;
let %x31: Tensor[(1, 28, 28, 128), float32] = %tensor_015;
let %storage_016: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][16]) /* ty=Storage[] */;
let %tensor_016: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_016, 0 /* ty=int64 */, meta[relay.Constant][42] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][16]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%77 = fn (%p016: Tensor[(1, 28, 28, 128), float32], %p116: Tensor[(3, 3, 128, 128), float32], %p212: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%75 = nn.conv2d(%p016, %p116, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%76 = add(%75, %p212) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%76) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%78 = (%x31, meta[relay.Constant][43] /* ty=Tensor[(3, 3, 128, 128), float32] */, meta[relay.Constant][44] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%79 = (%tensor_016,);
let %x32: () = vm.invoke_tvm_op(%77, %78, %79) /* ty=() */;
let %x33: Tensor[(1, 28, 28, 128), float32] = %tensor_016;
let %storage_017: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][17]) /* ty=Storage[] */;
let %tensor_017: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_017, 0 /* ty=int64 */, meta[relay.Constant][45] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][17]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%80 = fn (%p017: Tensor[(1, 56, 56, 256), float32], %p117: Tensor[(1, 1, 256, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
nn.conv2d(%p017, %p117, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%81 = (%x29, meta[relay.Constant][46] /* ty=Tensor[(1, 1, 256, 512), float32] */);
%82 = (%tensor_017,);
let %x34: () = vm.invoke_tvm_op(%80, %81, %82) /* ty=() */;
let %x35: Tensor[(1, 28, 28, 512), float32] = %tensor_017;
let %storage_018: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][18]) /* ty=Storage[] */;
let %tensor_018: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_018, 0 /* ty=int64 */, meta[relay.Constant][47] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][18]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%84 = fn (%p018: Tensor[(1, 28, 28, 128), float32], %p118: Tensor[(1, 1, 128, 512), float32], %p213: Tensor[(1, 28, 28, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%83 = nn.conv2d(%p018, %p118, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */;
add(%83, %p213) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%85 = (%x33, meta[relay.Constant][48] /* ty=Tensor[(1, 1, 128, 512), float32] */, %x35);
%86 = (%tensor_018,);
let %x36: () = vm.invoke_tvm_op(%84, %85, %86) /* ty=() */;
let %x37: Tensor[(1, 28, 28, 512), float32] = %tensor_018;
let %storage_019: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][19]) /* ty=Storage[] */;
let %tensor_019: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_019, 0 /* ty=int64 */, meta[relay.Constant][49] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][19]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%88 = fn (%p019: Tensor[(1, 28, 28, 512), float32], %p119: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%87 = add(%p019, %p119) /* ty=Tensor[(1, 28, 28, 512), float32] */;
nn.relu(%87) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%89 = (%x37, meta[relay.Constant][50] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%90 = (%tensor_019,);
let %x38: () = vm.invoke_tvm_op(%88, %89, %90) /* ty=() */;
let %x39: Tensor[(1, 28, 28, 512), float32] = %tensor_019;
let %storage_020: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][20]) /* ty=Storage[] */;
let %tensor_020: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_020, 0 /* ty=int64 */, meta[relay.Constant][51] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][20]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%93 = fn (%p020: Tensor[(1, 28, 28, 512), float32], %p120: Tensor[(1, 1, 512, 128), float32], %p214: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%91 = nn.conv2d(%p020, %p120, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%92 = add(%91, %p214) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%92) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%94 = (%x39, meta[relay.Constant][52] /* ty=Tensor[(1, 1, 512, 128), float32] */, meta[relay.Constant][53] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%95 = (%tensor_020,);
let %x40: () = vm.invoke_tvm_op(%93, %94, %95) /* ty=() */;
let %x41: Tensor[(1, 28, 28, 128), float32] = %tensor_020;
let %storage_021: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][21]) /* ty=Storage[] */;
let %tensor_021: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_021, 0 /* ty=int64 */, meta[relay.Constant][54] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][21]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%98 = fn (%p021: Tensor[(1, 28, 28, 128), float32], %p121: Tensor[(3, 3, 128, 128), float32], %p215: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%96 = nn.conv2d(%p021, %p121, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%97 = add(%96, %p215) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%97) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%99 = (%x41, meta[relay.Constant][55] /* ty=Tensor[(3, 3, 128, 128), float32] */, meta[relay.Constant][56] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%100 = (%tensor_021,);
let %x42: () = vm.invoke_tvm_op(%98, %99, %100) /* ty=() */;
let %x43: Tensor[(1, 28, 28, 128), float32] = %tensor_021;
let %storage_022: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][22]) /* ty=Storage[] */;
let %tensor_022: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_022, 0 /* ty=int64 */, meta[relay.Constant][57] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][22]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%102 = fn (%p022: Tensor[(1, 28, 28, 128), float32], %p122: Tensor[(1, 1, 128, 512), float32], %p216: Tensor[(1, 28, 28, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%101 = nn.conv2d(%p022, %p122, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */;
add(%101, %p216) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%103 = (%x43, meta[relay.Constant][58] /* ty=Tensor[(1, 1, 128, 512), float32] */, %x37);
%104 = (%tensor_022,);
let %x44: () = vm.invoke_tvm_op(%102, %103, %104) /* ty=() */;
let %x45: Tensor[(1, 28, 28, 512), float32] = %tensor_022;
let %storage_023: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][23]) /* ty=Storage[] */;
let %tensor_023: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_023, 0 /* ty=int64 */, meta[relay.Constant][59] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][23]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%106 = fn (%p023: Tensor[(1, 28, 28, 512), float32], %p123: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%105 = add(%p023, %p123) /* ty=Tensor[(1, 28, 28, 512), float32] */;
nn.relu(%105) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%107 = (%x45, meta[relay.Constant][60] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%108 = (%tensor_023,);
let %x46: () = vm.invoke_tvm_op(%106, %107, %108) /* ty=() */;
let %x47: Tensor[(1, 28, 28, 512), float32] = %tensor_023;
let %storage_024: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][24]) /* ty=Storage[] */;
let %tensor_024: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_024, 0 /* ty=int64 */, meta[relay.Constant][61] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][24]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%111 = fn (%p024: Tensor[(1, 28, 28, 512), float32], %p124: Tensor[(1, 1, 512, 128), float32], %p217: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%109 = nn.conv2d(%p024, %p124, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%110 = add(%109, %p217) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%110) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%112 = (%x47, meta[relay.Constant][62] /* ty=Tensor[(1, 1, 512, 128), float32] */, meta[relay.Constant][63] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%113 = (%tensor_024,);
let %x48: () = vm.invoke_tvm_op(%111, %112, %113) /* ty=() */;
let %x49: Tensor[(1, 28, 28, 128), float32] = %tensor_024;
let %storage_025: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][25]) /* ty=Storage[] */;
let %tensor_025: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_025, 0 /* ty=int64 */, meta[relay.Constant][64] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][25]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%116 = fn (%p025: Tensor[(1, 28, 28, 128), float32], %p125: Tensor[(3, 3, 128, 128), float32], %p218: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%114 = nn.conv2d(%p025, %p125, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%115 = add(%114, %p218) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%115) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%117 = (%x49, meta[relay.Constant][65] /* ty=Tensor[(3, 3, 128, 128), float32] */, meta[relay.Constant][66] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%118 = (%tensor_025,);
let %x50: () = vm.invoke_tvm_op(%116, %117, %118) /* ty=() */;
let %x51: Tensor[(1, 28, 28, 128), float32] = %tensor_025;
let %storage_026: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][26]) /* ty=Storage[] */;
let %tensor_026: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_026, 0 /* ty=int64 */, meta[relay.Constant][67] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][26]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%120 = fn (%p026: Tensor[(1, 28, 28, 128), float32], %p126: Tensor[(1, 1, 128, 512), float32], %p219: Tensor[(1, 28, 28, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%119 = nn.conv2d(%p026, %p126, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */;
add(%119, %p219) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%121 = (%x51, meta[relay.Constant][68] /* ty=Tensor[(1, 1, 128, 512), float32] */, %x45);
%122 = (%tensor_026,);
let %x52: () = vm.invoke_tvm_op(%120, %121, %122) /* ty=() */;
let %x53: Tensor[(1, 28, 28, 512), float32] = %tensor_026;
let %storage_027: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][27]) /* ty=Storage[] */;
let %tensor_027: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_027, 0 /* ty=int64 */, meta[relay.Constant][69] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][27]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%124 = fn (%p027: Tensor[(1, 28, 28, 512), float32], %p127: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%123 = add(%p027, %p127) /* ty=Tensor[(1, 28, 28, 512), float32] */;
nn.relu(%123) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%125 = (%x53, meta[relay.Constant][70] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%126 = (%tensor_027,);
let %x54: () = vm.invoke_tvm_op(%124, %125, %126) /* ty=() */;
let %x55: Tensor[(1, 28, 28, 512), float32] = %tensor_027;
let %storage_028: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][28]) /* ty=Storage[] */;
let %tensor_028: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_028, 0 /* ty=int64 */, meta[relay.Constant][71] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][28]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%129 = fn (%p028: Tensor[(1, 28, 28, 512), float32], %p128: Tensor[(1, 1, 512, 128), float32], %p220: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%127 = nn.conv2d(%p028, %p128, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%128 = add(%127, %p220) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%128) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%130 = (%x55, meta[relay.Constant][72] /* ty=Tensor[(1, 1, 512, 128), float32] */, meta[relay.Constant][73] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%131 = (%tensor_028,);
let %x56: () = vm.invoke_tvm_op(%129, %130, %131) /* ty=() */;
let %x57: Tensor[(1, 28, 28, 128), float32] = %tensor_028;
let %storage_029: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][29]) /* ty=Storage[] */;
let %tensor_029: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_029, 0 /* ty=int64 */, meta[relay.Constant][74] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][29]) /* ty=Tensor[(1, 28, 28, 128), float32] */;
%134 = fn (%p029: Tensor[(1, 28, 28, 128), float32], %p129: Tensor[(3, 3, 128, 128), float32], %p221: Tensor[(1, 1, 1, 128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] {
%132 = nn.conv2d(%p029, %p129, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */;
%133 = add(%132, %p221) /* ty=Tensor[(1, 28, 28, 128), float32] */;
nn.relu(%133) /* ty=Tensor[(1, 28, 28, 128), float32] */
};
%135 = (%x57, meta[relay.Constant][75] /* ty=Tensor[(3, 3, 128, 128), float32] */, meta[relay.Constant][76] /* ty=Tensor[(1, 1, 1, 128), float32] */);
%136 = (%tensor_029,);
let %x58: () = vm.invoke_tvm_op(%134, %135, %136) /* ty=() */;
let %x59: Tensor[(1, 28, 28, 128), float32] = %tensor_029;
let %storage_030: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][30]) /* ty=Storage[] */;
let %tensor_030: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_030, 0 /* ty=int64 */, meta[relay.Constant][77] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][30]) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%140 = fn (%p030: Tensor[(1, 28, 28, 128), float32], %p130: Tensor[(1, 1, 128, 512), float32], %p222: Tensor[(1, 28, 28, 512), float32], %p31: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] {
%137 = nn.conv2d(%p030, %p130, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */;
%138 = add(%137, %p222) /* ty=Tensor[(1, 28, 28, 512), float32] */;
%139 = add(%138, %p31) /* ty=Tensor[(1, 28, 28, 512), float32] */;
nn.relu(%139) /* ty=Tensor[(1, 28, 28, 512), float32] */
};
%141 = (%x59, meta[relay.Constant][78] /* ty=Tensor[(1, 1, 128, 512), float32] */, %x53, meta[relay.Constant][79] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%142 = (%tensor_030,);
let %x60: () = vm.invoke_tvm_op(%140, %141, %142) /* ty=() */;
let %x61: Tensor[(1, 28, 28, 512), float32] = %tensor_030;
let %storage_031: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][31]) /* ty=Storage[] */;
let %tensor_031: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_031, 0 /* ty=int64 */, meta[relay.Constant][80] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][31]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%145 = fn (%p031: Tensor[(1, 28, 28, 512), float32], %p131: Tensor[(1, 1, 512, 256), float32], %p223: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%143 = nn.conv2d(%p031, %p131, strides=[2, 2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%144 = add(%143, %p223) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%144) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%146 = (%x61, meta[relay.Constant][81] /* ty=Tensor[(1, 1, 512, 256), float32] */, meta[relay.Constant][82] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%147 = (%tensor_031,);
let %x62: () = vm.invoke_tvm_op(%145, %146, %147) /* ty=() */;
let %x63: Tensor[(1, 14, 14, 256), float32] = %tensor_031;
let %storage_032: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][32]) /* ty=Storage[] */;
let %tensor_032: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_032, 0 /* ty=int64 */, meta[relay.Constant][83] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][32]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%150 = fn (%p032: Tensor[(1, 14, 14, 256), float32], %p132: Tensor[(3, 3, 256, 256), float32], %p224: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%148 = nn.conv2d(%p032, %p132, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%149 = add(%148, %p224) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%149) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%151 = (%x63, meta[relay.Constant][84] /* ty=Tensor[(3, 3, 256, 256), float32] */, meta[relay.Constant][85] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%152 = (%tensor_032,);
let %x64: () = vm.invoke_tvm_op(%150, %151, %152) /* ty=() */;
let %x65: Tensor[(1, 14, 14, 256), float32] = %tensor_032;
let %storage_033: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][33]) /* ty=Storage[] */;
let %tensor_033: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_033, 0 /* ty=int64 */, meta[relay.Constant][86] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][33]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%153 = fn (%p033: Tensor[(1, 28, 28, 512), float32], %p133: Tensor[(1, 1, 512, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
nn.conv2d(%p033, %p133, strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%154 = (%x61, meta[relay.Constant][87] /* ty=Tensor[(1, 1, 512, 1024), float32] */);
%155 = (%tensor_033,);
let %x66: () = vm.invoke_tvm_op(%153, %154, %155) /* ty=() */;
let %x67: Tensor[(1, 14, 14, 1024), float32] = %tensor_033;
let %storage_034: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][34]) /* ty=Storage[] */;
let %tensor_034: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_034, 0 /* ty=int64 */, meta[relay.Constant][88] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][34]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%157 = fn (%p034: Tensor[(1, 14, 14, 256), float32], %p134: Tensor[(1, 1, 256, 1024), float32], %p225: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%156 = nn.conv2d(%p034, %p134, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */;
add(%156, %p225) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%158 = (%x65, meta[relay.Constant][89] /* ty=Tensor[(1, 1, 256, 1024), float32] */, %x67);
%159 = (%tensor_034,);
let %x68: () = vm.invoke_tvm_op(%157, %158, %159) /* ty=() */;
let %x69: Tensor[(1, 14, 14, 1024), float32] = %tensor_034;
let %storage_035: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][35]) /* ty=Storage[] */;
let %tensor_035: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_035, 0 /* ty=int64 */, meta[relay.Constant][90] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][35]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%161 = fn (%p035: Tensor[(1, 14, 14, 1024), float32], %p135: Tensor[(1, 1, 1, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%160 = add(%p035, %p135) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
nn.relu(%160) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%162 = (%x69, meta[relay.Constant][91] /* ty=Tensor[(1, 1, 1, 1024), float32] */);
%163 = (%tensor_035,);
let %x70: () = vm.invoke_tvm_op(%161, %162, %163) /* ty=() */;
let %x71: Tensor[(1, 14, 14, 1024), float32] = %tensor_035;
let %storage_036: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][36]) /* ty=Storage[] */;
let %tensor_036: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_036, 0 /* ty=int64 */, meta[relay.Constant][92] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][36]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%166 = fn (%p036: Tensor[(1, 14, 14, 1024), float32], %p136: Tensor[(1, 1, 1024, 256), float32], %p226: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%164 = nn.conv2d(%p036, %p136, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%165 = add(%164, %p226) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%165) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%167 = (%x71, meta[relay.Constant][93] /* ty=Tensor[(1, 1, 1024, 256), float32] */, meta[relay.Constant][94] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%168 = (%tensor_036,);
let %x72: () = vm.invoke_tvm_op(%166, %167, %168) /* ty=() */;
let %x73: Tensor[(1, 14, 14, 256), float32] = %tensor_036;
let %storage_037: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][37]) /* ty=Storage[] */;
let %tensor_037: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_037, 0 /* ty=int64 */, meta[relay.Constant][95] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][37]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%171 = fn (%p037: Tensor[(1, 14, 14, 256), float32], %p137: Tensor[(3, 3, 256, 256), float32], %p227: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%169 = nn.conv2d(%p037, %p137, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%170 = add(%169, %p227) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%170) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%172 = (%x73, meta[relay.Constant][96] /* ty=Tensor[(3, 3, 256, 256), float32] */, meta[relay.Constant][97] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%173 = (%tensor_037,);
let %x74: () = vm.invoke_tvm_op(%171, %172, %173) /* ty=() */;
let %x75: Tensor[(1, 14, 14, 256), float32] = %tensor_037;
let %storage_038: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][38]) /* ty=Storage[] */;
let %tensor_038: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_038, 0 /* ty=int64 */, meta[relay.Constant][98] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][38]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%175 = fn (%p038: Tensor[(1, 14, 14, 256), float32], %p138: Tensor[(1, 1, 256, 1024), float32], %p228: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%174 = nn.conv2d(%p038, %p138, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */;
add(%174, %p228) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%176 = (%x75, meta[relay.Constant][99] /* ty=Tensor[(1, 1, 256, 1024), float32] */, %x69);
%177 = (%tensor_038,);
let %x76: () = vm.invoke_tvm_op(%175, %176, %177) /* ty=() */;
let %x77: Tensor[(1, 14, 14, 1024), float32] = %tensor_038;
let %storage_039: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][39]) /* ty=Storage[] */;
let %tensor_039: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_039, 0 /* ty=int64 */, meta[relay.Constant][100] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][39]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%179 = fn (%p039: Tensor[(1, 14, 14, 1024), float32], %p139: Tensor[(1, 1, 1, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%178 = add(%p039, %p139) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
nn.relu(%178) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%180 = (%x77, meta[relay.Constant][101] /* ty=Tensor[(1, 1, 1, 1024), float32] */);
%181 = (%tensor_039,);
let %x78: () = vm.invoke_tvm_op(%179, %180, %181) /* ty=() */;
let %x79: Tensor[(1, 14, 14, 1024), float32] = %tensor_039;
let %storage_040: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][40]) /* ty=Storage[] */;
let %tensor_040: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_040, 0 /* ty=int64 */, meta[relay.Constant][102] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][40]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%184 = fn (%p040: Tensor[(1, 14, 14, 1024), float32], %p140: Tensor[(1, 1, 1024, 256), float32], %p229: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%182 = nn.conv2d(%p040, %p140, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%183 = add(%182, %p229) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%183) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%185 = (%x79, meta[relay.Constant][103] /* ty=Tensor[(1, 1, 1024, 256), float32] */, meta[relay.Constant][104] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%186 = (%tensor_040,);
let %x80: () = vm.invoke_tvm_op(%184, %185, %186) /* ty=() */;
let %x81: Tensor[(1, 14, 14, 256), float32] = %tensor_040;
let %storage_041: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][41]) /* ty=Storage[] */;
let %tensor_041: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_041, 0 /* ty=int64 */, meta[relay.Constant][105] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][41]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%189 = fn (%p041: Tensor[(1, 14, 14, 256), float32], %p141: Tensor[(3, 3, 256, 256), float32], %p230: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%187 = nn.conv2d(%p041, %p141, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%188 = add(%187, %p230) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%188) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%190 = (%x81, meta[relay.Constant][106] /* ty=Tensor[(3, 3, 256, 256), float32] */, meta[relay.Constant][107] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%191 = (%tensor_041,);
let %x82: () = vm.invoke_tvm_op(%189, %190, %191) /* ty=() */;
let %x83: Tensor[(1, 14, 14, 256), float32] = %tensor_041;
let %storage_042: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][42]) /* ty=Storage[] */;
let %tensor_042: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_042, 0 /* ty=int64 */, meta[relay.Constant][108] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][42]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%193 = fn (%p042: Tensor[(1, 14, 14, 256), float32], %p142: Tensor[(1, 1, 256, 1024), float32], %p231: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%192 = nn.conv2d(%p042, %p142, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */;
add(%192, %p231) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%194 = (%x83, meta[relay.Constant][109] /* ty=Tensor[(1, 1, 256, 1024), float32] */, %x77);
%195 = (%tensor_042,);
let %x84: () = vm.invoke_tvm_op(%193, %194, %195) /* ty=() */;
let %x85: Tensor[(1, 14, 14, 1024), float32] = %tensor_042;
let %storage_043: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][43]) /* ty=Storage[] */;
let %tensor_043: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_043, 0 /* ty=int64 */, meta[relay.Constant][110] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][43]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%197 = fn (%p043: Tensor[(1, 14, 14, 1024), float32], %p143: Tensor[(1, 1, 1, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%196 = add(%p043, %p143) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
nn.relu(%196) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%198 = (%x85, meta[relay.Constant][111] /* ty=Tensor[(1, 1, 1, 1024), float32] */);
%199 = (%tensor_043,);
let %x86: () = vm.invoke_tvm_op(%197, %198, %199) /* ty=() */;
let %x87: Tensor[(1, 14, 14, 1024), float32] = %tensor_043;
let %storage_044: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][44]) /* ty=Storage[] */;
let %tensor_044: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_044, 0 /* ty=int64 */, meta[relay.Constant][112] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][44]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%202 = fn (%p044: Tensor[(1, 14, 14, 1024), float32], %p144: Tensor[(1, 1, 1024, 256), float32], %p232: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%200 = nn.conv2d(%p044, %p144, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%201 = add(%200, %p232) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%201) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%203 = (%x87, meta[relay.Constant][113] /* ty=Tensor[(1, 1, 1024, 256), float32] */, meta[relay.Constant][114] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%204 = (%tensor_044,);
let %x88: () = vm.invoke_tvm_op(%202, %203, %204) /* ty=() */;
let %x89: Tensor[(1, 14, 14, 256), float32] = %tensor_044;
let %storage_045: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][45]) /* ty=Storage[] */;
let %tensor_045: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_045, 0 /* ty=int64 */, meta[relay.Constant][115] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][45]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%207 = fn (%p045: Tensor[(1, 14, 14, 256), float32], %p145: Tensor[(3, 3, 256, 256), float32], %p233: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%205 = nn.conv2d(%p045, %p145, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%206 = add(%205, %p233) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%206) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%208 = (%x89, meta[relay.Constant][116] /* ty=Tensor[(3, 3, 256, 256), float32] */, meta[relay.Constant][117] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%209 = (%tensor_045,);
let %x90: () = vm.invoke_tvm_op(%207, %208, %209) /* ty=() */;
let %x91: Tensor[(1, 14, 14, 256), float32] = %tensor_045;
let %storage_046: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][46]) /* ty=Storage[] */;
let %tensor_046: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_046, 0 /* ty=int64 */, meta[relay.Constant][118] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][46]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%211 = fn (%p046: Tensor[(1, 14, 14, 256), float32], %p146: Tensor[(1, 1, 256, 1024), float32], %p234: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%210 = nn.conv2d(%p046, %p146, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */;
add(%210, %p234) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%212 = (%x91, meta[relay.Constant][119] /* ty=Tensor[(1, 1, 256, 1024), float32] */, %x85);
%213 = (%tensor_046,);
let %x92: () = vm.invoke_tvm_op(%211, %212, %213) /* ty=() */;
let %x93: Tensor[(1, 14, 14, 1024), float32] = %tensor_046;
let %storage_047: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][47]) /* ty=Storage[] */;
let %tensor_047: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_047, 0 /* ty=int64 */, meta[relay.Constant][120] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][47]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%215 = fn (%p047: Tensor[(1, 14, 14, 1024), float32], %p147: Tensor[(1, 1, 1, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%214 = add(%p047, %p147) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
nn.relu(%214) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%216 = (%x93, meta[relay.Constant][121] /* ty=Tensor[(1, 1, 1, 1024), float32] */);
%217 = (%tensor_047,);
let %x94: () = vm.invoke_tvm_op(%215, %216, %217) /* ty=() */;
let %x95: Tensor[(1, 14, 14, 1024), float32] = %tensor_047;
let %storage_048: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][48]) /* ty=Storage[] */;
let %tensor_048: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_048, 0 /* ty=int64 */, meta[relay.Constant][122] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][48]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%220 = fn (%p048: Tensor[(1, 14, 14, 1024), float32], %p148: Tensor[(1, 1, 1024, 256), float32], %p235: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%218 = nn.conv2d(%p048, %p148, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%219 = add(%218, %p235) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%219) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%221 = (%x95, meta[relay.Constant][123] /* ty=Tensor[(1, 1, 1024, 256), float32] */, meta[relay.Constant][124] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%222 = (%tensor_048,);
let %x96: () = vm.invoke_tvm_op(%220, %221, %222) /* ty=() */;
let %x97: Tensor[(1, 14, 14, 256), float32] = %tensor_048;
let %storage_049: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][49]) /* ty=Storage[] */;
let %tensor_049: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_049, 0 /* ty=int64 */, meta[relay.Constant][125] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][49]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%225 = fn (%p049: Tensor[(1, 14, 14, 256), float32], %p149: Tensor[(3, 3, 256, 256), float32], %p236: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%223 = nn.conv2d(%p049, %p149, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%224 = add(%223, %p236) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%224) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%226 = (%x97, meta[relay.Constant][126] /* ty=Tensor[(3, 3, 256, 256), float32] */, meta[relay.Constant][127] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%227 = (%tensor_049,);
let %x98: () = vm.invoke_tvm_op(%225, %226, %227) /* ty=() */;
let %x99: Tensor[(1, 14, 14, 256), float32] = %tensor_049;
let %storage_050: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][50]) /* ty=Storage[] */;
let %tensor_050: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_050, 0 /* ty=int64 */, meta[relay.Constant][128] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][50]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%229 = fn (%p050: Tensor[(1, 14, 14, 256), float32], %p150: Tensor[(1, 1, 256, 1024), float32], %p237: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%228 = nn.conv2d(%p050, %p150, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */;
add(%228, %p237) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%230 = (%x99, meta[relay.Constant][129] /* ty=Tensor[(1, 1, 256, 1024), float32] */, %x93);
%231 = (%tensor_050,);
let %x100: () = vm.invoke_tvm_op(%229, %230, %231) /* ty=() */;
let %x101: Tensor[(1, 14, 14, 1024), float32] = %tensor_050;
let %storage_051: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][51]) /* ty=Storage[] */;
let %tensor_051: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_051, 0 /* ty=int64 */, meta[relay.Constant][130] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][51]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%233 = fn (%p051: Tensor[(1, 14, 14, 1024), float32], %p151: Tensor[(1, 1, 1, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%232 = add(%p051, %p151) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
nn.relu(%232) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%234 = (%x101, meta[relay.Constant][131] /* ty=Tensor[(1, 1, 1, 1024), float32] */);
%235 = (%tensor_051,);
let %x102: () = vm.invoke_tvm_op(%233, %234, %235) /* ty=() */;
let %x103: Tensor[(1, 14, 14, 1024), float32] = %tensor_051;
let %storage_052: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][52]) /* ty=Storage[] */;
let %tensor_052: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_052, 0 /* ty=int64 */, meta[relay.Constant][132] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][52]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%238 = fn (%p052: Tensor[(1, 14, 14, 1024), float32], %p152: Tensor[(1, 1, 1024, 256), float32], %p238: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%236 = nn.conv2d(%p052, %p152, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%237 = add(%236, %p238) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%237) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%239 = (%x103, meta[relay.Constant][133] /* ty=Tensor[(1, 1, 1024, 256), float32] */, meta[relay.Constant][134] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%240 = (%tensor_052,);
let %x104: () = vm.invoke_tvm_op(%238, %239, %240) /* ty=() */;
let %x105: Tensor[(1, 14, 14, 256), float32] = %tensor_052;
let %storage_053: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][53]) /* ty=Storage[] */;
let %tensor_053: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_053, 0 /* ty=int64 */, meta[relay.Constant][135] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][53]) /* ty=Tensor[(1, 14, 14, 256), float32] */;
%243 = fn (%p053: Tensor[(1, 14, 14, 256), float32], %p153: Tensor[(3, 3, 256, 256), float32], %p239: Tensor[(1, 1, 1, 256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] {
%241 = nn.conv2d(%p053, %p153, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */;
%242 = add(%241, %p239) /* ty=Tensor[(1, 14, 14, 256), float32] */;
nn.relu(%242) /* ty=Tensor[(1, 14, 14, 256), float32] */
};
%244 = (%x105, meta[relay.Constant][136] /* ty=Tensor[(3, 3, 256, 256), float32] */, meta[relay.Constant][137] /* ty=Tensor[(1, 1, 1, 256), float32] */);
%245 = (%tensor_053,);
let %x106: () = vm.invoke_tvm_op(%243, %244, %245) /* ty=() */;
let %x107: Tensor[(1, 14, 14, 256), float32] = %tensor_053;
let %storage_054: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][54]) /* ty=Storage[] */;
let %tensor_054: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_054, 0 /* ty=int64 */, meta[relay.Constant][138] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][54]) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%249 = fn (%p054: Tensor[(1, 14, 14, 256), float32], %p154: Tensor[(1, 1, 256, 1024), float32], %p240: Tensor[(1, 14, 14, 1024), float32], %p32: Tensor[(1, 1, 1, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] {
%246 = nn.conv2d(%p054, %p154, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%247 = add(%246, %p240) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
%248 = add(%247, %p32) /* ty=Tensor[(1, 14, 14, 1024), float32] */;
nn.relu(%248) /* ty=Tensor[(1, 14, 14, 1024), float32] */
};
%250 = (%x107, meta[relay.Constant][139] /* ty=Tensor[(1, 1, 256, 1024), float32] */, %x101, meta[relay.Constant][140] /* ty=Tensor[(1, 1, 1, 1024), float32] */);
%251 = (%tensor_054,);
let %x108: () = vm.invoke_tvm_op(%249, %250, %251) /* ty=() */;
let %x109: Tensor[(1, 14, 14, 1024), float32] = %tensor_054;
let %storage_055: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][55]) /* ty=Storage[] */;
let %tensor_055: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_055, 0 /* ty=int64 */, meta[relay.Constant][141] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][55]) /* ty=Tensor[(1, 7, 7, 512), float32] */;
%254 = fn (%p055: Tensor[(1, 14, 14, 1024), float32], %p155: Tensor[(1, 1, 1024, 512), float32], %p241: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] {
%252 = nn.conv2d(%p055, %p155, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */;
%253 = add(%252, %p241) /* ty=Tensor[(1, 7, 7, 512), float32] */;
nn.relu(%253) /* ty=Tensor[(1, 7, 7, 512), float32] */
};
%255 = (%x109, meta[relay.Constant][142] /* ty=Tensor[(1, 1, 1024, 512), float32] */, meta[relay.Constant][143] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%256 = (%tensor_055,);
let %x110: () = vm.invoke_tvm_op(%254, %255, %256) /* ty=() */;
let %x111: Tensor[(1, 7, 7, 512), float32] = %tensor_055;
let %storage_056: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][56]) /* ty=Storage[] */;
let %tensor_056: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_056, 0 /* ty=int64 */, meta[relay.Constant][144] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][56]) /* ty=Tensor[(1, 7, 7, 512), float32] */;
%259 = fn (%p056: Tensor[(1, 7, 7, 512), float32], %p156: Tensor[(3, 3, 512, 512), float32], %p242: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] {
%257 = nn.conv2d(%p056, %p156, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */;
%258 = add(%257, %p242) /* ty=Tensor[(1, 7, 7, 512), float32] */;
nn.relu(%258) /* ty=Tensor[(1, 7, 7, 512), float32] */
};
%260 = (%x111, meta[relay.Constant][145] /* ty=Tensor[(3, 3, 512, 512), float32] */, meta[relay.Constant][146] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%261 = (%tensor_056,);
let %x112: () = vm.invoke_tvm_op(%259, %260, %261) /* ty=() */;
let %x113: Tensor[(1, 7, 7, 512), float32] = %tensor_056;
let %storage_057: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][57]) /* ty=Storage[] */;
let %tensor_057: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_057, 0 /* ty=int64 */, meta[relay.Constant][147] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][57]) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%262 = fn (%p057: Tensor[(1, 14, 14, 1024), float32], %p157: Tensor[(1, 1, 1024, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] {
nn.conv2d(%p057, %p157, strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */
};
%263 = (%x109, meta[relay.Constant][148] /* ty=Tensor[(1, 1, 1024, 2048), float32] */);
%264 = (%tensor_057,);
let %x114: () = vm.invoke_tvm_op(%262, %263, %264) /* ty=() */;
let %x115: Tensor[(1, 7, 7, 2048), float32] = %tensor_057;
let %storage_058: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][58]) /* ty=Storage[] */;
let %tensor_058: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_058, 0 /* ty=int64 */, meta[relay.Constant][149] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][58]) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%266 = fn (%p058: Tensor[(1, 7, 7, 512), float32], %p158: Tensor[(1, 1, 512, 2048), float32], %p243: Tensor[(1, 7, 7, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] {
%265 = nn.conv2d(%p058, %p158, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */;
add(%265, %p243) /* ty=Tensor[(1, 7, 7, 2048), float32] */
};
%267 = (%x113, meta[relay.Constant][150] /* ty=Tensor[(1, 1, 512, 2048), float32] */, %x115);
%268 = (%tensor_058,);
let %x116: () = vm.invoke_tvm_op(%266, %267, %268) /* ty=() */;
let %x117: Tensor[(1, 7, 7, 2048), float32] = %tensor_058;
let %storage_059: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][59]) /* ty=Storage[] */;
let %tensor_059: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_059, 0 /* ty=int64 */, meta[relay.Constant][151] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][59]) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%270 = fn (%p059: Tensor[(1, 7, 7, 2048), float32], %p159: Tensor[(1, 1, 1, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] {
%269 = add(%p059, %p159) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
nn.relu(%269) /* ty=Tensor[(1, 7, 7, 2048), float32] */
};
%271 = (%x117, meta[relay.Constant][152] /* ty=Tensor[(1, 1, 1, 2048), float32] */);
%272 = (%tensor_059,);
let %x118: () = vm.invoke_tvm_op(%270, %271, %272) /* ty=() */;
let %x119: Tensor[(1, 7, 7, 2048), float32] = %tensor_059;
let %storage_060: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][60]) /* ty=Storage[] */;
let %tensor_060: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_060, 0 /* ty=int64 */, meta[relay.Constant][153] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][60]) /* ty=Tensor[(1, 7, 7, 512), float32] */;
%275 = fn (%p060: Tensor[(1, 7, 7, 2048), float32], %p160: Tensor[(1, 1, 2048, 512), float32], %p244: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] {
%273 = nn.conv2d(%p060, %p160, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */;
%274 = add(%273, %p244) /* ty=Tensor[(1, 7, 7, 512), float32] */;
nn.relu(%274) /* ty=Tensor[(1, 7, 7, 512), float32] */
};
%276 = (%x119, meta[relay.Constant][154] /* ty=Tensor[(1, 1, 2048, 512), float32] */, meta[relay.Constant][155] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%277 = (%tensor_060,);
let %x120: () = vm.invoke_tvm_op(%275, %276, %277) /* ty=() */;
let %x121: Tensor[(1, 7, 7, 512), float32] = %tensor_060;
let %storage_061: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][61]) /* ty=Storage[] */;
let %tensor_061: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_061, 0 /* ty=int64 */, meta[relay.Constant][156] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][61]) /* ty=Tensor[(1, 7, 7, 512), float32] */;
%280 = fn (%p061: Tensor[(1, 7, 7, 512), float32], %p161: Tensor[(3, 3, 512, 512), float32], %p245: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] {
%278 = nn.conv2d(%p061, %p161, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */;
%279 = add(%278, %p245) /* ty=Tensor[(1, 7, 7, 512), float32] */;
nn.relu(%279) /* ty=Tensor[(1, 7, 7, 512), float32] */
};
%281 = (%x121, meta[relay.Constant][157] /* ty=Tensor[(3, 3, 512, 512), float32] */, meta[relay.Constant][158] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%282 = (%tensor_061,);
let %x122: () = vm.invoke_tvm_op(%280, %281, %282) /* ty=() */;
let %x123: Tensor[(1, 7, 7, 512), float32] = %tensor_061;
let %storage_062: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][62]) /* ty=Storage[] */;
let %tensor_062: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_062, 0 /* ty=int64 */, meta[relay.Constant][159] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][62]) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%284 = fn (%p062: Tensor[(1, 7, 7, 512), float32], %p162: Tensor[(1, 1, 512, 2048), float32], %p246: Tensor[(1, 7, 7, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] {
%283 = nn.conv2d(%p062, %p162, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */;
add(%283, %p246) /* ty=Tensor[(1, 7, 7, 2048), float32] */
};
%285 = (%x123, meta[relay.Constant][160] /* ty=Tensor[(1, 1, 512, 2048), float32] */, %x117);
%286 = (%tensor_062,);
let %x124: () = vm.invoke_tvm_op(%284, %285, %286) /* ty=() */;
let %x125: Tensor[(1, 7, 7, 2048), float32] = %tensor_062;
let %storage_063: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][63]) /* ty=Storage[] */;
let %tensor_063: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_063, 0 /* ty=int64 */, meta[relay.Constant][161] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][63]) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%288 = fn (%p063: Tensor[(1, 7, 7, 2048), float32], %p163: Tensor[(1, 1, 1, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] {
%287 = add(%p063, %p163) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
nn.relu(%287) /* ty=Tensor[(1, 7, 7, 2048), float32] */
};
%289 = (%x125, meta[relay.Constant][162] /* ty=Tensor[(1, 1, 1, 2048), float32] */);
%290 = (%tensor_063,);
let %x126: () = vm.invoke_tvm_op(%288, %289, %290) /* ty=() */;
let %x127: Tensor[(1, 7, 7, 2048), float32] = %tensor_063;
let %storage_064: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][64]) /* ty=Storage[] */;
let %tensor_064: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_064, 0 /* ty=int64 */, meta[relay.Constant][163] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][64]) /* ty=Tensor[(1, 7, 7, 512), float32] */;
%293 = fn (%p064: Tensor[(1, 7, 7, 2048), float32], %p164: Tensor[(1, 1, 2048, 512), float32], %p247: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] {
%291 = nn.conv2d(%p064, %p164, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */;
%292 = add(%291, %p247) /* ty=Tensor[(1, 7, 7, 512), float32] */;
nn.relu(%292) /* ty=Tensor[(1, 7, 7, 512), float32] */
};
%294 = (%x127, meta[relay.Constant][164] /* ty=Tensor[(1, 1, 2048, 512), float32] */, meta[relay.Constant][165] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%295 = (%tensor_064,);
let %x128: () = vm.invoke_tvm_op(%293, %294, %295) /* ty=() */;
let %x129: Tensor[(1, 7, 7, 512), float32] = %tensor_064;
let %storage_065: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][65]) /* ty=Storage[] */;
let %tensor_065: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_065, 0 /* ty=int64 */, meta[relay.Constant][166] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][65]) /* ty=Tensor[(1, 7, 7, 512), float32] */;
%298 = fn (%p065: Tensor[(1, 7, 7, 512), float32], %p165: Tensor[(3, 3, 512, 512), float32], %p248: Tensor[(1, 1, 1, 512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] {
%296 = nn.conv2d(%p065, %p165, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */;
%297 = add(%296, %p248) /* ty=Tensor[(1, 7, 7, 512), float32] */;
nn.relu(%297) /* ty=Tensor[(1, 7, 7, 512), float32] */
};
%299 = (%x129, meta[relay.Constant][167] /* ty=Tensor[(3, 3, 512, 512), float32] */, meta[relay.Constant][168] /* ty=Tensor[(1, 1, 1, 512), float32] */);
%300 = (%tensor_065,);
let %x130: () = vm.invoke_tvm_op(%298, %299, %300) /* ty=() */;
let %x131: Tensor[(1, 7, 7, 512), float32] = %tensor_065;
let %storage_066: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][66]) /* ty=Storage[] */;
let %tensor_066: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_066, 0 /* ty=int64 */, meta[relay.Constant][169] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][66]) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%305 = fn (%p066: Tensor[(1, 7, 7, 512), float32], %p166: Tensor[(1, 1, 512, 2048), float32], %p249: Tensor[(1, 7, 7, 2048), float32], %p33: Tensor[(1, 1, 1, 2048), float32], %p4: Tensor[(1, 1, 1, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] {
%301 = nn.conv2d(%p066, %p166, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%302 = add(%301, %p249) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%303 = multiply(%302, %p33) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
%304 = add(%303, %p4) /* ty=Tensor[(1, 7, 7, 2048), float32] */;
nn.relu(%304) /* ty=Tensor[(1, 7, 7, 2048), float32] */
};
%306 = (%x131, meta[relay.Constant][170] /* ty=Tensor[(1, 1, 512, 2048), float32] */, %x125, meta[relay.Constant][171] /* ty=Tensor[(1, 1, 1, 2048), float32] */, meta[relay.Constant][172] /* ty=Tensor[(1, 1, 1, 2048), float32] */);
%307 = (%tensor_066,);
let %x132: () = vm.invoke_tvm_op(%305, %306, %307) /* ty=() */;
let %x133: Tensor[(1, 7, 7, 2048), float32] = %tensor_066;
let %storage_067: Storage[] = memory.alloc_storage(8192 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][67]) /* ty=Storage[] */;
let %tensor_067: Tensor[(1, 1, 1, 2048), float32] = memory.alloc_tensor(%storage_067, 0 /* ty=int64 */, meta[relay.Constant][173] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][67]) /* ty=Tensor[(1, 1, 1, 2048), float32] */;
%308 = fn (%p067: Tensor[(1, 7, 7, 2048), float32], Primitive=1) -> Tensor[(1, 1, 1, 2048), float32] {
nn.global_avg_pool2d(%p067, layout="NHWC") /* ty=Tensor[(1, 1, 1, 2048), float32] */
};
%309 = (%x133,);
%310 = (%tensor_067,);
let %x134: () = vm.invoke_tvm_op(%308, %309, %310) /* ty=() */;
let %x135: Tensor[(1, 1, 1, 2048), float32] = %tensor_067;
let %storage_068: Storage[] = memory.alloc_storage(8192 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][68]) /* ty=Storage[] */;
let %tensor_068: Tensor[(1, 2048), float32] = memory.alloc_tensor(%storage_068, 0 /* ty=int64 */, meta[relay.Constant][174] /* ty=Tensor[(2), int64] */, meta[relay.attrs.AllocTensorAttrs][68]) /* ty=Tensor[(1, 2048), float32] */;
%312 = fn (%p068: Tensor[(1, 1, 1, 2048), float32], Primitive=1) -> Tensor[(1, 2048), float32] {
%311 = layout_transform(%p068, src_layout="NHWC", dst_layout="NCHW") /* ty=Tensor[(1, 2048, 1, 1), float32] */;
nn.batch_flatten(%311) /* ty=Tensor[(1, 2048), float32] */
};
%313 = (%x135,);
%314 = (%tensor_068,);
let %x136: () = vm.invoke_tvm_op(%312, %313, %314) /* ty=() */;
let %x137: Tensor[(1, 2048), float32] = %tensor_068;
let %storage_069: Storage[] = memory.alloc_storage(4000 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][69]) /* ty=Storage[] */;
let %tensor_069: Tensor[(1, 1000), float32] = memory.alloc_tensor(%storage_069, 0 /* ty=int64 */, meta[relay.Constant][175] /* ty=Tensor[(2), int64] */, meta[relay.attrs.AllocTensorAttrs][69]) /* ty=Tensor[(1, 1000), float32] */;
%316 = fn (%p069: Tensor[(1, 2048), float32], %p167: Tensor[(100, 2048, 10), float32], %p250: Tensor[(1000), float32], Primitive=1) -> Tensor[(1, 1000), float32] {
%315 = nn.contrib_dense_pack(%p069, %p167, units=None, out_dtype="float32") /* ty=Tensor[(1, 1000), float32] */;
add(%315, %p250) /* ty=Tensor[(1, 1000), float32] */
};
%317 = (%x137, meta[relay.Constant][176] /* ty=Tensor[(100, 2048, 10), float32] */, meta[relay.Constant][177] /* ty=Tensor[(1000), float32] */);
%318 = (%tensor_069,);
let %x138: () = vm.invoke_tvm_op(%316, %317, %318) /* ty=() */;
let %x139: Tensor[(1, 1000), float32] = %tensor_069;
let %storage_070: Storage[] = memory.alloc_storage(4000 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][70]) /* ty=Storage[] */;
let %tensor_070: Tensor[(1, 1000), float32] = memory.alloc_tensor(%storage_070, 0 /* ty=int64 */, meta[relay.Constant][178] /* ty=Tensor[(2), int64] */, meta[relay.attrs.AllocTensorAttrs][70]) /* ty=Tensor[(1, 1000), float32] */;
%319 = fn (%p070: Tensor[(1, 1000), float32], Primitive=1) -> Tensor[(1, 1000), float32] {
nn.softmax(%p070) /* ty=Tensor[(1, 1000), float32] */
};
%320 = (%x139,);
%321 = (%tensor_070,);
let %x140: () = vm.invoke_tvm_op(%319, %320, %321) /* ty=() */;
let %x141: Tensor[(1, 1000), float32] = %tensor_070;
%x141
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment