Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save AmosLewis/0ea83146624b692c593a1a7bd91c45d7 to your computer and use it in GitHub Desktop.
Save AmosLewis/0ea83146624b692c593a1a7bd91c45d7 to your computer and use it in GitHub Desktop.
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,3,224,224],f32>) -> (!torch.vtensor<[1,21,224,224],f32>, !torch.vtensor<[1,21,224,224],f32>) attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} {
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x3x7x7xf32>) : !torch.vtensor<[64,3,7,7],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x1x1xf32>) : !torch.vtensor<[64,64,1,1],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x256x1x1xf32>) : !torch.vtensor<[64,256,1,1],f32>
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x256x1x1xf32>) : !torch.vtensor<[64,256,1,1],f32>
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x1x1xf32>) : !torch.vtensor<[128,256,1,1],f32>
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x256x1x1xf32>) : !torch.vtensor<[512,256,1,1],f32>
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x1x1xf32>) : !torch.vtensor<[128,512,1,1],f32>
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x1x1xf32>) : !torch.vtensor<[128,512,1,1],f32>
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x1x1xf32>) : !torch.vtensor<[128,512,1,1],f32>
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x1x1xf32>) : !torch.vtensor<[256,512,1,1],f32>
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x512x1x1xf32>) : !torch.vtensor<[1024,512,1,1],f32>
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x1024x1x1xf32>) : !torch.vtensor<[512,1024,1,1],f32>
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512x1x1xf32>) : !torch.vtensor<[2048,512,1,1],f32>
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x1024x1x1xf32>) : !torch.vtensor<[2048,1024,1,1],f32>
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048x1x1xf32>) : !torch.vtensor<[512,2048,1,1],f32>
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512x1x1xf32>) : !torch.vtensor<[2048,512,1,1],f32>
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048x1x1xf32>) : !torch.vtensor<[512,2048,1,1],f32>
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512x1x1xf32>) : !torch.vtensor<[2048,512,1,1],f32>
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x2048x1x1xf32>) : !torch.vtensor<[256,2048,1,1],f32>
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x2048x3x3xf32>) : !torch.vtensor<[256,2048,3,3],f32>
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x2048x3x3xf32>) : !torch.vtensor<[256,2048,3,3],f32>
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x2048x3x3xf32>) : !torch.vtensor<[256,2048,3,3],f32>
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x2048x1x1xf32>) : !torch.vtensor<[256,2048,1,1],f32>
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1280x1x1xf32>) : !torch.vtensor<[256,1280,1,1],f32>
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21x256x1x1xf32>) : !torch.vtensor<[21,256,1,1],f32>
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21xf32>) : !torch.vtensor<[21],f32>
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x3x3xf32>) : !torch.vtensor<[256,1024,3,3],f32>
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21x256x1x1xf32>) : !torch.vtensor<[21,256,1,1],f32>
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21xf32>) : !torch.vtensor<[21],f32>
%none = torch.constant.none
%126 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%127 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12 = torch.constant.int 12
%128 = torch.aten.item %126 : !torch.vtensor<[],f32> -> !torch.float
%129 = torch.aten.item %127 : !torch.vtensor<[],si8> -> !torch.int
%130 = torch.aten.quantize_per_tensor %arg0, %128, %129, %int12 : !torch.vtensor<[1,3,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8>
%131 = torch.aten.int_repr %130 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],si8>
%132 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%133 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%134 = torch.aten.item %132 : !torch.vtensor<[],f32> -> !torch.float
%135 = torch.aten.item %133 : !torch.vtensor<[],si8> -> !torch.int
%136 = torch.aten._make_per_tensor_quantized_tensor %131, %134, %135 : !torch.vtensor<[1,3,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8>
%137 = torch.aten.dequantize.self %136 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],f32>
%138 = torch.vtensor.literal(dense<224> : tensor<si64>) : !torch.vtensor<[],si64>
%139 = torch.vtensor.literal(dense<224> : tensor<si64>) : !torch.vtensor<[],si64>
%140 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%141 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_0 = torch.constant.int 12
%142 = torch.aten.item %140 : !torch.vtensor<[],f32> -> !torch.float
%143 = torch.aten.item %141 : !torch.vtensor<[],si8> -> !torch.int
%144 = torch.aten.quantize_per_tensor %0, %142, %143, %int12_0 : !torch.vtensor<[64,3,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,3,7,7],!torch.qint8>
%145 = torch.aten.int_repr %144 : !torch.vtensor<[64,3,7,7],!torch.qint8> -> !torch.vtensor<[64,3,7,7],si8>
%146 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%147 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%148 = torch.aten.item %146 : !torch.vtensor<[],f32> -> !torch.float
%149 = torch.aten.item %147 : !torch.vtensor<[],si8> -> !torch.int
%150 = torch.aten._make_per_tensor_quantized_tensor %145, %148, %149 : !torch.vtensor<[64,3,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,3,7,7],!torch.qint8>
%151 = torch.aten.dequantize.self %150 : !torch.vtensor<[64,3,7,7],!torch.qint8> -> !torch.vtensor<[64,3,7,7],f32>
%152 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%153 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1 = torch.constant.int 12
%154 = torch.aten.item %152 : !torch.vtensor<[],f32> -> !torch.float
%155 = torch.aten.item %153 : !torch.vtensor<[],si8> -> !torch.int
%156 = torch.aten.quantize_per_tensor %1, %154, %155, %int12_1 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%157 = torch.aten.int_repr %156 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%158 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%159 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%160 = torch.aten.item %158 : !torch.vtensor<[],f32> -> !torch.float
%161 = torch.aten.item %159 : !torch.vtensor<[],si8> -> !torch.int
%162 = torch.aten._make_per_tensor_quantized_tensor %157, %160, %161 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%163 = torch.aten.dequantize.self %162 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int3 = torch.constant.int 3
%int3_2 = torch.constant.int 3
%int1 = torch.constant.int 1
%int1_3 = torch.constant.int 1
%int2 = torch.constant.int 2
%int2_4 = torch.constant.int 2
%int0 = torch.constant.int 0
%164 = torch.prim.ListConstruct %int3, %int3_2 : (!torch.int, !torch.int) -> !torch.list<int>
%165 = torch.prim.ListConstruct %int1, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int>
%166 = torch.prim.ListConstruct %int2, %int2_4 : (!torch.int, !torch.int) -> !torch.list<int>
%167 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%false = torch.constant.bool false
%int1_5 = torch.constant.int 1
%168 = torch.aten.convolution %137, %151, %163, %166, %164, %165, %false, %167, %int1_5 : !torch.vtensor<[1,3,224,224],f32>, !torch.vtensor<[64,3,7,7],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,112,112],f32>
%169 = torch.aten.relu %168 : !torch.vtensor<[1,64,112,112],f32> -> !torch.vtensor<[1,64,112,112],f32>
%170 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%171 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_6 = torch.constant.int 12
%172 = torch.aten.item %170 : !torch.vtensor<[],f32> -> !torch.float
%173 = torch.aten.item %171 : !torch.vtensor<[],si8> -> !torch.int
%174 = torch.aten.quantize_per_tensor %169, %172, %173, %int12_6 : !torch.vtensor<[1,64,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,112,112],!torch.qint8>
%175 = torch.aten.int_repr %174 : !torch.vtensor<[1,64,112,112],!torch.qint8> -> !torch.vtensor<[1,64,112,112],si8>
%176 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%177 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%178 = torch.aten.item %176 : !torch.vtensor<[],f32> -> !torch.float
%179 = torch.aten.item %177 : !torch.vtensor<[],si8> -> !torch.int
%180 = torch.aten._make_per_tensor_quantized_tensor %175, %178, %179 : !torch.vtensor<[1,64,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,112,112],!torch.qint8>
%181 = torch.aten.dequantize.self %180 : !torch.vtensor<[1,64,112,112],!torch.qint8> -> !torch.vtensor<[1,64,112,112],f32>
%int3_7 = torch.constant.int 3
%int3_8 = torch.constant.int 3
%182 = torch.prim.ListConstruct %int3_7, %int3_8 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_9 = torch.constant.int 1
%int1_10 = torch.constant.int 1
%183 = torch.prim.ListConstruct %int1_9, %int1_10 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_11 = torch.constant.int 2
%int2_12 = torch.constant.int 2
%184 = torch.prim.ListConstruct %int2_11, %int2_12 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_13 = torch.constant.int 1
%int1_14 = torch.constant.int 1
%185 = torch.prim.ListConstruct %int1_13, %int1_14 : (!torch.int, !torch.int) -> !torch.list<int>
%false_15 = torch.constant.bool false
%186 = torch.aten.max_pool2d %181, %182, %184, %183, %185, %false_15 : !torch.vtensor<[1,64,112,112],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,56,56],f32>
%187 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%188 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_16 = torch.constant.int 12
%189 = torch.aten.item %187 : !torch.vtensor<[],f32> -> !torch.float
%190 = torch.aten.item %188 : !torch.vtensor<[],si8> -> !torch.int
%191 = torch.aten.quantize_per_tensor %186, %189, %190, %int12_16 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%192 = torch.aten.int_repr %191 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%193 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%195 = torch.aten.item %193 : !torch.vtensor<[],f32> -> !torch.float
%196 = torch.aten.item %194 : !torch.vtensor<[],si8> -> !torch.int
%197 = torch.aten._make_per_tensor_quantized_tensor %192, %195, %196 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%198 = torch.aten.dequantize.self %197 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%199 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_17 = torch.constant.int 12
%201 = torch.aten.item %199 : !torch.vtensor<[],f32> -> !torch.float
%202 = torch.aten.item %200 : !torch.vtensor<[],si8> -> !torch.int
%203 = torch.aten.quantize_per_tensor %2, %201, %202, %int12_17 : !torch.vtensor<[64,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,1,1],!torch.qint8>
%204 = torch.aten.int_repr %203 : !torch.vtensor<[64,64,1,1],!torch.qint8> -> !torch.vtensor<[64,64,1,1],si8>
%205 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%207 = torch.aten.item %205 : !torch.vtensor<[],f32> -> !torch.float
%208 = torch.aten.item %206 : !torch.vtensor<[],si8> -> !torch.int
%209 = torch.aten._make_per_tensor_quantized_tensor %204, %207, %208 : !torch.vtensor<[64,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,1,1],!torch.qint8>
%210 = torch.aten.dequantize.self %209 : !torch.vtensor<[64,64,1,1],!torch.qint8> -> !torch.vtensor<[64,64,1,1],f32>
%211 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_18 = torch.constant.int 12
%213 = torch.aten.item %211 : !torch.vtensor<[],f32> -> !torch.float
%214 = torch.aten.item %212 : !torch.vtensor<[],si8> -> !torch.int
%215 = torch.aten.quantize_per_tensor %3, %213, %214, %int12_18 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%216 = torch.aten.int_repr %215 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%217 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%218 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%219 = torch.aten.item %217 : !torch.vtensor<[],f32> -> !torch.float
%220 = torch.aten.item %218 : !torch.vtensor<[],si8> -> !torch.int
%221 = torch.aten._make_per_tensor_quantized_tensor %216, %219, %220 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%222 = torch.aten.dequantize.self %221 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_19 = torch.constant.int 0
%int0_20 = torch.constant.int 0
%int1_21 = torch.constant.int 1
%int1_22 = torch.constant.int 1
%int1_23 = torch.constant.int 1
%int1_24 = torch.constant.int 1
%int0_25 = torch.constant.int 0
%223 = torch.prim.ListConstruct %int0_19, %int0_20 : (!torch.int, !torch.int) -> !torch.list<int>
%224 = torch.prim.ListConstruct %int1_21, %int1_22 : (!torch.int, !torch.int) -> !torch.list<int>
%225 = torch.prim.ListConstruct %int1_23, %int1_24 : (!torch.int, !torch.int) -> !torch.list<int>
%226 = torch.prim.ListConstruct %int0_25, %int0_25 : (!torch.int, !torch.int) -> !torch.list<int>
%false_26 = torch.constant.bool false
%int1_27 = torch.constant.int 1
%227 = torch.aten.convolution %198, %210, %222, %225, %223, %224, %false_26, %226, %int1_27 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%228 = torch.aten.relu %227 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%229 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%230 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_28 = torch.constant.int 12
%231 = torch.aten.item %229 : !torch.vtensor<[],f32> -> !torch.float
%232 = torch.aten.item %230 : !torch.vtensor<[],si8> -> !torch.int
%233 = torch.aten.quantize_per_tensor %228, %231, %232, %int12_28 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%234 = torch.aten.int_repr %233 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%235 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%236 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%237 = torch.aten.item %235 : !torch.vtensor<[],f32> -> !torch.float
%238 = torch.aten.item %236 : !torch.vtensor<[],si8> -> !torch.int
%239 = torch.aten._make_per_tensor_quantized_tensor %234, %237, %238 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%240 = torch.aten.dequantize.self %239 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%241 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%242 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_29 = torch.constant.int 12
%243 = torch.aten.item %241 : !torch.vtensor<[],f32> -> !torch.float
%244 = torch.aten.item %242 : !torch.vtensor<[],si8> -> !torch.int
%245 = torch.aten.quantize_per_tensor %4, %243, %244, %int12_29 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%246 = torch.aten.int_repr %245 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%247 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%248 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%249 = torch.aten.item %247 : !torch.vtensor<[],f32> -> !torch.float
%250 = torch.aten.item %248 : !torch.vtensor<[],si8> -> !torch.int
%251 = torch.aten._make_per_tensor_quantized_tensor %246, %249, %250 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%252 = torch.aten.dequantize.self %251 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%253 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%254 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_30 = torch.constant.int 12
%255 = torch.aten.item %253 : !torch.vtensor<[],f32> -> !torch.float
%256 = torch.aten.item %254 : !torch.vtensor<[],si8> -> !torch.int
%257 = torch.aten.quantize_per_tensor %5, %255, %256, %int12_30 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%258 = torch.aten.int_repr %257 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%259 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%260 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%261 = torch.aten.item %259 : !torch.vtensor<[],f32> -> !torch.float
%262 = torch.aten.item %260 : !torch.vtensor<[],si8> -> !torch.int
%263 = torch.aten._make_per_tensor_quantized_tensor %258, %261, %262 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%264 = torch.aten.dequantize.self %263 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_31 = torch.constant.int 1
%int1_32 = torch.constant.int 1
%int1_33 = torch.constant.int 1
%int1_34 = torch.constant.int 1
%int1_35 = torch.constant.int 1
%int1_36 = torch.constant.int 1
%int0_37 = torch.constant.int 0
%265 = torch.prim.ListConstruct %int1_31, %int1_32 : (!torch.int, !torch.int) -> !torch.list<int>
%266 = torch.prim.ListConstruct %int1_33, %int1_34 : (!torch.int, !torch.int) -> !torch.list<int>
%267 = torch.prim.ListConstruct %int1_35, %int1_36 : (!torch.int, !torch.int) -> !torch.list<int>
%268 = torch.prim.ListConstruct %int0_37, %int0_37 : (!torch.int, !torch.int) -> !torch.list<int>
%false_38 = torch.constant.bool false
%int1_39 = torch.constant.int 1
%269 = torch.aten.convolution %240, %252, %264, %267, %265, %266, %false_38, %268, %int1_39 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%270 = torch.aten.relu %269 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%271 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%272 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_40 = torch.constant.int 12
%273 = torch.aten.item %271 : !torch.vtensor<[],f32> -> !torch.float
%274 = torch.aten.item %272 : !torch.vtensor<[],si8> -> !torch.int
%275 = torch.aten.quantize_per_tensor %270, %273, %274, %int12_40 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%276 = torch.aten.int_repr %275 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%277 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%278 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%279 = torch.aten.item %277 : !torch.vtensor<[],f32> -> !torch.float
%280 = torch.aten.item %278 : !torch.vtensor<[],si8> -> !torch.int
%281 = torch.aten._make_per_tensor_quantized_tensor %276, %279, %280 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%282 = torch.aten.dequantize.self %281 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%283 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%284 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_41 = torch.constant.int 12
%285 = torch.aten.item %283 : !torch.vtensor<[],f32> -> !torch.float
%286 = torch.aten.item %284 : !torch.vtensor<[],si8> -> !torch.int
%287 = torch.aten.quantize_per_tensor %6, %285, %286, %int12_41 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%288 = torch.aten.int_repr %287 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%289 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%290 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%291 = torch.aten.item %289 : !torch.vtensor<[],f32> -> !torch.float
%292 = torch.aten.item %290 : !torch.vtensor<[],si8> -> !torch.int
%293 = torch.aten._make_per_tensor_quantized_tensor %288, %291, %292 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%294 = torch.aten.dequantize.self %293 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%295 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%296 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_42 = torch.constant.int 12
%297 = torch.aten.item %295 : !torch.vtensor<[],f32> -> !torch.float
%298 = torch.aten.item %296 : !torch.vtensor<[],si8> -> !torch.int
%299 = torch.aten.quantize_per_tensor %7, %297, %298, %int12_42 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%300 = torch.aten.int_repr %299 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%301 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%302 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%303 = torch.aten.item %301 : !torch.vtensor<[],f32> -> !torch.float
%304 = torch.aten.item %302 : !torch.vtensor<[],si8> -> !torch.int
%305 = torch.aten._make_per_tensor_quantized_tensor %300, %303, %304 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%306 = torch.aten.dequantize.self %305 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_43 = torch.constant.int 0
%int0_44 = torch.constant.int 0
%int1_45 = torch.constant.int 1
%int1_46 = torch.constant.int 1
%int1_47 = torch.constant.int 1
%int1_48 = torch.constant.int 1
%int0_49 = torch.constant.int 0
%307 = torch.prim.ListConstruct %int0_43, %int0_44 : (!torch.int, !torch.int) -> !torch.list<int>
%308 = torch.prim.ListConstruct %int1_45, %int1_46 : (!torch.int, !torch.int) -> !torch.list<int>
%309 = torch.prim.ListConstruct %int1_47, %int1_48 : (!torch.int, !torch.int) -> !torch.list<int>
%310 = torch.prim.ListConstruct %int0_49, %int0_49 : (!torch.int, !torch.int) -> !torch.list<int>
%false_50 = torch.constant.bool false
%int1_51 = torch.constant.int 1
%311 = torch.aten.convolution %282, %294, %306, %309, %307, %308, %false_50, %310, %int1_51 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%312 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%313 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_52 = torch.constant.int 12
%314 = torch.aten.item %312 : !torch.vtensor<[],f32> -> !torch.float
%315 = torch.aten.item %313 : !torch.vtensor<[],si8> -> !torch.int
%316 = torch.aten.quantize_per_tensor %311, %314, %315, %int12_52 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%317 = torch.aten.int_repr %316 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%318 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%319 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%320 = torch.aten.item %318 : !torch.vtensor<[],f32> -> !torch.float
%321 = torch.aten.item %319 : !torch.vtensor<[],si8> -> !torch.int
%322 = torch.aten._make_per_tensor_quantized_tensor %317, %320, %321 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%323 = torch.aten.dequantize.self %322 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%324 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%325 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_53 = torch.constant.int 12
%326 = torch.aten.item %324 : !torch.vtensor<[],f32> -> !torch.float
%327 = torch.aten.item %325 : !torch.vtensor<[],si8> -> !torch.int
%328 = torch.aten.quantize_per_tensor %8, %326, %327, %int12_53 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%329 = torch.aten.int_repr %328 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%330 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%331 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%332 = torch.aten.item %330 : !torch.vtensor<[],f32> -> !torch.float
%333 = torch.aten.item %331 : !torch.vtensor<[],si8> -> !torch.int
%334 = torch.aten._make_per_tensor_quantized_tensor %329, %332, %333 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%335 = torch.aten.dequantize.self %334 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%336 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%337 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_54 = torch.constant.int 12
%338 = torch.aten.item %336 : !torch.vtensor<[],f32> -> !torch.float
%339 = torch.aten.item %337 : !torch.vtensor<[],si8> -> !torch.int
%340 = torch.aten.quantize_per_tensor %9, %338, %339, %int12_54 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%341 = torch.aten.int_repr %340 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%342 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%343 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%344 = torch.aten.item %342 : !torch.vtensor<[],f32> -> !torch.float
%345 = torch.aten.item %343 : !torch.vtensor<[],si8> -> !torch.int
%346 = torch.aten._make_per_tensor_quantized_tensor %341, %344, %345 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%347 = torch.aten.dequantize.self %346 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_55 = torch.constant.int 0
%int0_56 = torch.constant.int 0
%int1_57 = torch.constant.int 1
%int1_58 = torch.constant.int 1
%int1_59 = torch.constant.int 1
%int1_60 = torch.constant.int 1
%int0_61 = torch.constant.int 0
%348 = torch.prim.ListConstruct %int0_55, %int0_56 : (!torch.int, !torch.int) -> !torch.list<int>
%349 = torch.prim.ListConstruct %int1_57, %int1_58 : (!torch.int, !torch.int) -> !torch.list<int>
%350 = torch.prim.ListConstruct %int1_59, %int1_60 : (!torch.int, !torch.int) -> !torch.list<int>
%351 = torch.prim.ListConstruct %int0_61, %int0_61 : (!torch.int, !torch.int) -> !torch.list<int>
%false_62 = torch.constant.bool false
%int1_63 = torch.constant.int 1
%352 = torch.aten.convolution %198, %335, %347, %350, %348, %349, %false_62, %351, %int1_63 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%353 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_64 = torch.constant.int 12
%355 = torch.aten.item %353 : !torch.vtensor<[],f32> -> !torch.float
%356 = torch.aten.item %354 : !torch.vtensor<[],si8> -> !torch.int
%357 = torch.aten.quantize_per_tensor %352, %355, %356, %int12_64 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%358 = torch.aten.int_repr %357 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%359 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%361 = torch.aten.item %359 : !torch.vtensor<[],f32> -> !torch.float
%362 = torch.aten.item %360 : !torch.vtensor<[],si8> -> !torch.int
%363 = torch.aten._make_per_tensor_quantized_tensor %358, %361, %362 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%364 = torch.aten.dequantize.self %363 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%int1_65 = torch.constant.int 1
%365 = torch.aten.add.Tensor %323, %364, %int1_65 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[1,256,56,56],f32>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%366 = torch.aten.relu %365 : !torch.vtensor<[1,256,56,56],f32> -> !torch.vtensor<[1,256,56,56],f32>
%367 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%368 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_66 = torch.constant.int 12
%369 = torch.aten.item %367 : !torch.vtensor<[],f32> -> !torch.float
%370 = torch.aten.item %368 : !torch.vtensor<[],si8> -> !torch.int
%371 = torch.aten.quantize_per_tensor %366, %369, %370, %int12_66 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%372 = torch.aten.int_repr %371 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%373 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%374 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%375 = torch.aten.item %373 : !torch.vtensor<[],f32> -> !torch.float
%376 = torch.aten.item %374 : !torch.vtensor<[],si8> -> !torch.int
%377 = torch.aten._make_per_tensor_quantized_tensor %372, %375, %376 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%378 = torch.aten.dequantize.self %377 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%379 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%380 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_67 = torch.constant.int 12
%381 = torch.aten.item %379 : !torch.vtensor<[],f32> -> !torch.float
%382 = torch.aten.item %380 : !torch.vtensor<[],si8> -> !torch.int
%383 = torch.aten.quantize_per_tensor %10, %381, %382, %int12_67 : !torch.vtensor<[64,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%384 = torch.aten.int_repr %383 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],si8>
%385 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%386 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%387 = torch.aten.item %385 : !torch.vtensor<[],f32> -> !torch.float
%388 = torch.aten.item %386 : !torch.vtensor<[],si8> -> !torch.int
%389 = torch.aten._make_per_tensor_quantized_tensor %384, %387, %388 : !torch.vtensor<[64,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%390 = torch.aten.dequantize.self %389 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],f32>
%391 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%392 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_68 = torch.constant.int 12
%393 = torch.aten.item %391 : !torch.vtensor<[],f32> -> !torch.float
%394 = torch.aten.item %392 : !torch.vtensor<[],si8> -> !torch.int
%395 = torch.aten.quantize_per_tensor %11, %393, %394, %int12_68 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%396 = torch.aten.int_repr %395 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%397 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%398 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%399 = torch.aten.item %397 : !torch.vtensor<[],f32> -> !torch.float
%400 = torch.aten.item %398 : !torch.vtensor<[],si8> -> !torch.int
%401 = torch.aten._make_per_tensor_quantized_tensor %396, %399, %400 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%402 = torch.aten.dequantize.self %401 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_69 = torch.constant.int 0
%int0_70 = torch.constant.int 0
%int1_71 = torch.constant.int 1
%int1_72 = torch.constant.int 1
%int1_73 = torch.constant.int 1
%int1_74 = torch.constant.int 1
%int0_75 = torch.constant.int 0
%403 = torch.prim.ListConstruct %int0_69, %int0_70 : (!torch.int, !torch.int) -> !torch.list<int>
%404 = torch.prim.ListConstruct %int1_71, %int1_72 : (!torch.int, !torch.int) -> !torch.list<int>
%405 = torch.prim.ListConstruct %int1_73, %int1_74 : (!torch.int, !torch.int) -> !torch.list<int>
%406 = torch.prim.ListConstruct %int0_75, %int0_75 : (!torch.int, !torch.int) -> !torch.list<int>
%false_76 = torch.constant.bool false
%int1_77 = torch.constant.int 1
%407 = torch.aten.convolution %378, %390, %402, %405, %403, %404, %false_76, %406, %int1_77 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[64,256,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%408 = torch.aten.relu %407 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%409 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%410 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_78 = torch.constant.int 12
%411 = torch.aten.item %409 : !torch.vtensor<[],f32> -> !torch.float
%412 = torch.aten.item %410 : !torch.vtensor<[],si8> -> !torch.int
%413 = torch.aten.quantize_per_tensor %408, %411, %412, %int12_78 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%414 = torch.aten.int_repr %413 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%415 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%416 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%417 = torch.aten.item %415 : !torch.vtensor<[],f32> -> !torch.float
%418 = torch.aten.item %416 : !torch.vtensor<[],si8> -> !torch.int
%419 = torch.aten._make_per_tensor_quantized_tensor %414, %417, %418 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%420 = torch.aten.dequantize.self %419 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%421 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%422 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_79 = torch.constant.int 12
%423 = torch.aten.item %421 : !torch.vtensor<[],f32> -> !torch.float
%424 = torch.aten.item %422 : !torch.vtensor<[],si8> -> !torch.int
%425 = torch.aten.quantize_per_tensor %12, %423, %424, %int12_79 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%426 = torch.aten.int_repr %425 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%427 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%428 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%429 = torch.aten.item %427 : !torch.vtensor<[],f32> -> !torch.float
%430 = torch.aten.item %428 : !torch.vtensor<[],si8> -> !torch.int
%431 = torch.aten._make_per_tensor_quantized_tensor %426, %429, %430 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%432 = torch.aten.dequantize.self %431 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%433 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%434 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_80 = torch.constant.int 12
%435 = torch.aten.item %433 : !torch.vtensor<[],f32> -> !torch.float
%436 = torch.aten.item %434 : !torch.vtensor<[],si8> -> !torch.int
%437 = torch.aten.quantize_per_tensor %13, %435, %436, %int12_80 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%438 = torch.aten.int_repr %437 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%439 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%440 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%441 = torch.aten.item %439 : !torch.vtensor<[],f32> -> !torch.float
%442 = torch.aten.item %440 : !torch.vtensor<[],si8> -> !torch.int
%443 = torch.aten._make_per_tensor_quantized_tensor %438, %441, %442 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%444 = torch.aten.dequantize.self %443 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_81 = torch.constant.int 1
%int1_82 = torch.constant.int 1
%int1_83 = torch.constant.int 1
%int1_84 = torch.constant.int 1
%int1_85 = torch.constant.int 1
%int1_86 = torch.constant.int 1
%int0_87 = torch.constant.int 0
%445 = torch.prim.ListConstruct %int1_81, %int1_82 : (!torch.int, !torch.int) -> !torch.list<int>
%446 = torch.prim.ListConstruct %int1_83, %int1_84 : (!torch.int, !torch.int) -> !torch.list<int>
%447 = torch.prim.ListConstruct %int1_85, %int1_86 : (!torch.int, !torch.int) -> !torch.list<int>
%448 = torch.prim.ListConstruct %int0_87, %int0_87 : (!torch.int, !torch.int) -> !torch.list<int>
%false_88 = torch.constant.bool false
%int1_89 = torch.constant.int 1
%449 = torch.aten.convolution %420, %432, %444, %447, %445, %446, %false_88, %448, %int1_89 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%450 = torch.aten.relu %449 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%451 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%452 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_90 = torch.constant.int 12
%453 = torch.aten.item %451 : !torch.vtensor<[],f32> -> !torch.float
%454 = torch.aten.item %452 : !torch.vtensor<[],si8> -> !torch.int
%455 = torch.aten.quantize_per_tensor %450, %453, %454, %int12_90 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%456 = torch.aten.int_repr %455 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%457 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%458 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%459 = torch.aten.item %457 : !torch.vtensor<[],f32> -> !torch.float
%460 = torch.aten.item %458 : !torch.vtensor<[],si8> -> !torch.int
%461 = torch.aten._make_per_tensor_quantized_tensor %456, %459, %460 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%462 = torch.aten.dequantize.self %461 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%463 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%464 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_91 = torch.constant.int 12
%465 = torch.aten.item %463 : !torch.vtensor<[],f32> -> !torch.float
%466 = torch.aten.item %464 : !torch.vtensor<[],si8> -> !torch.int
%467 = torch.aten.quantize_per_tensor %14, %465, %466, %int12_91 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%468 = torch.aten.int_repr %467 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%469 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%470 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%471 = torch.aten.item %469 : !torch.vtensor<[],f32> -> !torch.float
%472 = torch.aten.item %470 : !torch.vtensor<[],si8> -> !torch.int
%473 = torch.aten._make_per_tensor_quantized_tensor %468, %471, %472 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%474 = torch.aten.dequantize.self %473 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%475 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%476 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_92 = torch.constant.int 12
%477 = torch.aten.item %475 : !torch.vtensor<[],f32> -> !torch.float
%478 = torch.aten.item %476 : !torch.vtensor<[],si8> -> !torch.int
%479 = torch.aten.quantize_per_tensor %15, %477, %478, %int12_92 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%480 = torch.aten.int_repr %479 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%481 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%482 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%483 = torch.aten.item %481 : !torch.vtensor<[],f32> -> !torch.float
%484 = torch.aten.item %482 : !torch.vtensor<[],si8> -> !torch.int
%485 = torch.aten._make_per_tensor_quantized_tensor %480, %483, %484 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%486 = torch.aten.dequantize.self %485 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_93 = torch.constant.int 0
%int0_94 = torch.constant.int 0
%int1_95 = torch.constant.int 1
%int1_96 = torch.constant.int 1
%int1_97 = torch.constant.int 1
%int1_98 = torch.constant.int 1
%int0_99 = torch.constant.int 0
%487 = torch.prim.ListConstruct %int0_93, %int0_94 : (!torch.int, !torch.int) -> !torch.list<int>
%488 = torch.prim.ListConstruct %int1_95, %int1_96 : (!torch.int, !torch.int) -> !torch.list<int>
%489 = torch.prim.ListConstruct %int1_97, %int1_98 : (!torch.int, !torch.int) -> !torch.list<int>
%490 = torch.prim.ListConstruct %int0_99, %int0_99 : (!torch.int, !torch.int) -> !torch.list<int>
%false_100 = torch.constant.bool false
%int1_101 = torch.constant.int 1
%491 = torch.aten.convolution %462, %474, %486, %489, %487, %488, %false_100, %490, %int1_101 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%492 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%493 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_102 = torch.constant.int 12
%494 = torch.aten.item %492 : !torch.vtensor<[],f32> -> !torch.float
%495 = torch.aten.item %493 : !torch.vtensor<[],si8> -> !torch.int
%496 = torch.aten.quantize_per_tensor %491, %494, %495, %int12_102 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%497 = torch.aten.int_repr %496 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%498 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%499 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%500 = torch.aten.item %498 : !torch.vtensor<[],f32> -> !torch.float
%501 = torch.aten.item %499 : !torch.vtensor<[],si8> -> !torch.int
%502 = torch.aten._make_per_tensor_quantized_tensor %497, %500, %501 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%503 = torch.aten.dequantize.self %502 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%int1_103 = torch.constant.int 1
%504 = torch.aten.add.Tensor %503, %378, %int1_103 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[1,256,56,56],f32>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%505 = torch.aten.relu %504 : !torch.vtensor<[1,256,56,56],f32> -> !torch.vtensor<[1,256,56,56],f32>
%506 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%507 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_104 = torch.constant.int 12
%508 = torch.aten.item %506 : !torch.vtensor<[],f32> -> !torch.float
%509 = torch.aten.item %507 : !torch.vtensor<[],si8> -> !torch.int
%510 = torch.aten.quantize_per_tensor %505, %508, %509, %int12_104 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%511 = torch.aten.int_repr %510 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%512 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%513 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%514 = torch.aten.item %512 : !torch.vtensor<[],f32> -> !torch.float
%515 = torch.aten.item %513 : !torch.vtensor<[],si8> -> !torch.int
%516 = torch.aten._make_per_tensor_quantized_tensor %511, %514, %515 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%517 = torch.aten.dequantize.self %516 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%518 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%519 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_105 = torch.constant.int 12
%520 = torch.aten.item %518 : !torch.vtensor<[],f32> -> !torch.float
%521 = torch.aten.item %519 : !torch.vtensor<[],si8> -> !torch.int
%522 = torch.aten.quantize_per_tensor %16, %520, %521, %int12_105 : !torch.vtensor<[64,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%523 = torch.aten.int_repr %522 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],si8>
%524 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%525 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%526 = torch.aten.item %524 : !torch.vtensor<[],f32> -> !torch.float
%527 = torch.aten.item %525 : !torch.vtensor<[],si8> -> !torch.int
%528 = torch.aten._make_per_tensor_quantized_tensor %523, %526, %527 : !torch.vtensor<[64,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%529 = torch.aten.dequantize.self %528 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],f32>
%530 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%531 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_106 = torch.constant.int 12
%532 = torch.aten.item %530 : !torch.vtensor<[],f32> -> !torch.float
%533 = torch.aten.item %531 : !torch.vtensor<[],si8> -> !torch.int
%534 = torch.aten.quantize_per_tensor %17, %532, %533, %int12_106 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%535 = torch.aten.int_repr %534 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%536 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%537 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%538 = torch.aten.item %536 : !torch.vtensor<[],f32> -> !torch.float
%539 = torch.aten.item %537 : !torch.vtensor<[],si8> -> !torch.int
%540 = torch.aten._make_per_tensor_quantized_tensor %535, %538, %539 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%541 = torch.aten.dequantize.self %540 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_107 = torch.constant.int 0
%int0_108 = torch.constant.int 0
%int1_109 = torch.constant.int 1
%int1_110 = torch.constant.int 1
%int1_111 = torch.constant.int 1
%int1_112 = torch.constant.int 1
%int0_113 = torch.constant.int 0
%542 = torch.prim.ListConstruct %int0_107, %int0_108 : (!torch.int, !torch.int) -> !torch.list<int>
%543 = torch.prim.ListConstruct %int1_109, %int1_110 : (!torch.int, !torch.int) -> !torch.list<int>
%544 = torch.prim.ListConstruct %int1_111, %int1_112 : (!torch.int, !torch.int) -> !torch.list<int>
%545 = torch.prim.ListConstruct %int0_113, %int0_113 : (!torch.int, !torch.int) -> !torch.list<int>
%false_114 = torch.constant.bool false
%int1_115 = torch.constant.int 1
%546 = torch.aten.convolution %517, %529, %541, %544, %542, %543, %false_114, %545, %int1_115 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[64,256,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%547 = torch.aten.relu %546 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%548 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%549 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_116 = torch.constant.int 12
%550 = torch.aten.item %548 : !torch.vtensor<[],f32> -> !torch.float
%551 = torch.aten.item %549 : !torch.vtensor<[],si8> -> !torch.int
%552 = torch.aten.quantize_per_tensor %547, %550, %551, %int12_116 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%553 = torch.aten.int_repr %552 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%554 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%555 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%556 = torch.aten.item %554 : !torch.vtensor<[],f32> -> !torch.float
%557 = torch.aten.item %555 : !torch.vtensor<[],si8> -> !torch.int
%558 = torch.aten._make_per_tensor_quantized_tensor %553, %556, %557 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%559 = torch.aten.dequantize.self %558 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%560 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%561 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_117 = torch.constant.int 12
%562 = torch.aten.item %560 : !torch.vtensor<[],f32> -> !torch.float
%563 = torch.aten.item %561 : !torch.vtensor<[],si8> -> !torch.int
%564 = torch.aten.quantize_per_tensor %18, %562, %563, %int12_117 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%565 = torch.aten.int_repr %564 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%566 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%567 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%568 = torch.aten.item %566 : !torch.vtensor<[],f32> -> !torch.float
%569 = torch.aten.item %567 : !torch.vtensor<[],si8> -> !torch.int
%570 = torch.aten._make_per_tensor_quantized_tensor %565, %568, %569 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%571 = torch.aten.dequantize.self %570 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%572 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%573 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_118 = torch.constant.int 12
%574 = torch.aten.item %572 : !torch.vtensor<[],f32> -> !torch.float
%575 = torch.aten.item %573 : !torch.vtensor<[],si8> -> !torch.int
%576 = torch.aten.quantize_per_tensor %19, %574, %575, %int12_118 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%577 = torch.aten.int_repr %576 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%578 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%579 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%580 = torch.aten.item %578 : !torch.vtensor<[],f32> -> !torch.float
%581 = torch.aten.item %579 : !torch.vtensor<[],si8> -> !torch.int
%582 = torch.aten._make_per_tensor_quantized_tensor %577, %580, %581 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%583 = torch.aten.dequantize.self %582 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_119 = torch.constant.int 1
%int1_120 = torch.constant.int 1
%int1_121 = torch.constant.int 1
%int1_122 = torch.constant.int 1
%int1_123 = torch.constant.int 1
%int1_124 = torch.constant.int 1
%int0_125 = torch.constant.int 0
%584 = torch.prim.ListConstruct %int1_119, %int1_120 : (!torch.int, !torch.int) -> !torch.list<int>
%585 = torch.prim.ListConstruct %int1_121, %int1_122 : (!torch.int, !torch.int) -> !torch.list<int>
%586 = torch.prim.ListConstruct %int1_123, %int1_124 : (!torch.int, !torch.int) -> !torch.list<int>
%587 = torch.prim.ListConstruct %int0_125, %int0_125 : (!torch.int, !torch.int) -> !torch.list<int>
%false_126 = torch.constant.bool false
%int1_127 = torch.constant.int 1
%588 = torch.aten.convolution %559, %571, %583, %586, %584, %585, %false_126, %587, %int1_127 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%589 = torch.aten.relu %588 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%590 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%591 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_128 = torch.constant.int 12
%592 = torch.aten.item %590 : !torch.vtensor<[],f32> -> !torch.float
%593 = torch.aten.item %591 : !torch.vtensor<[],si8> -> !torch.int
%594 = torch.aten.quantize_per_tensor %589, %592, %593, %int12_128 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%595 = torch.aten.int_repr %594 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%596 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%597 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%598 = torch.aten.item %596 : !torch.vtensor<[],f32> -> !torch.float
%599 = torch.aten.item %597 : !torch.vtensor<[],si8> -> !torch.int
%600 = torch.aten._make_per_tensor_quantized_tensor %595, %598, %599 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%601 = torch.aten.dequantize.self %600 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%602 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%603 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_129 = torch.constant.int 12
%604 = torch.aten.item %602 : !torch.vtensor<[],f32> -> !torch.float
%605 = torch.aten.item %603 : !torch.vtensor<[],si8> -> !torch.int
%606 = torch.aten.quantize_per_tensor %20, %604, %605, %int12_129 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%607 = torch.aten.int_repr %606 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%608 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%609 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%610 = torch.aten.item %608 : !torch.vtensor<[],f32> -> !torch.float
%611 = torch.aten.item %609 : !torch.vtensor<[],si8> -> !torch.int
%612 = torch.aten._make_per_tensor_quantized_tensor %607, %610, %611 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%613 = torch.aten.dequantize.self %612 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%614 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%615 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_130 = torch.constant.int 12
%616 = torch.aten.item %614 : !torch.vtensor<[],f32> -> !torch.float
%617 = torch.aten.item %615 : !torch.vtensor<[],si8> -> !torch.int
%618 = torch.aten.quantize_per_tensor %21, %616, %617, %int12_130 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%619 = torch.aten.int_repr %618 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%620 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%621 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%622 = torch.aten.item %620 : !torch.vtensor<[],f32> -> !torch.float
%623 = torch.aten.item %621 : !torch.vtensor<[],si8> -> !torch.int
%624 = torch.aten._make_per_tensor_quantized_tensor %619, %622, %623 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%625 = torch.aten.dequantize.self %624 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_131 = torch.constant.int 0
%int0_132 = torch.constant.int 0
%int1_133 = torch.constant.int 1
%int1_134 = torch.constant.int 1
%int1_135 = torch.constant.int 1
%int1_136 = torch.constant.int 1
%int0_137 = torch.constant.int 0
%626 = torch.prim.ListConstruct %int0_131, %int0_132 : (!torch.int, !torch.int) -> !torch.list<int>
%627 = torch.prim.ListConstruct %int1_133, %int1_134 : (!torch.int, !torch.int) -> !torch.list<int>
%628 = torch.prim.ListConstruct %int1_135, %int1_136 : (!torch.int, !torch.int) -> !torch.list<int>
%629 = torch.prim.ListConstruct %int0_137, %int0_137 : (!torch.int, !torch.int) -> !torch.list<int>
%false_138 = torch.constant.bool false
%int1_139 = torch.constant.int 1
%630 = torch.aten.convolution %601, %613, %625, %628, %626, %627, %false_138, %629, %int1_139 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%631 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%632 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_140 = torch.constant.int 12
%633 = torch.aten.item %631 : !torch.vtensor<[],f32> -> !torch.float
%634 = torch.aten.item %632 : !torch.vtensor<[],si8> -> !torch.int
%635 = torch.aten.quantize_per_tensor %630, %633, %634, %int12_140 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%636 = torch.aten.int_repr %635 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%637 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%638 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%639 = torch.aten.item %637 : !torch.vtensor<[],f32> -> !torch.float
%640 = torch.aten.item %638 : !torch.vtensor<[],si8> -> !torch.int
%641 = torch.aten._make_per_tensor_quantized_tensor %636, %639, %640 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%642 = torch.aten.dequantize.self %641 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%int1_141 = torch.constant.int 1
%643 = torch.aten.add.Tensor %642, %517, %int1_141 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[1,256,56,56],f32>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%644 = torch.aten.relu %643 : !torch.vtensor<[1,256,56,56],f32> -> !torch.vtensor<[1,256,56,56],f32>
%645 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_142 = torch.constant.int 12
%647 = torch.aten.item %645 : !torch.vtensor<[],f32> -> !torch.float
%648 = torch.aten.item %646 : !torch.vtensor<[],si8> -> !torch.int
%649 = torch.aten.quantize_per_tensor %644, %647, %648, %int12_142 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%650 = torch.aten.int_repr %649 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%651 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%652 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%653 = torch.aten.item %651 : !torch.vtensor<[],f32> -> !torch.float
%654 = torch.aten.item %652 : !torch.vtensor<[],si8> -> !torch.int
%655 = torch.aten._make_per_tensor_quantized_tensor %650, %653, %654 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%656 = torch.aten.dequantize.self %655 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%657 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%658 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_143 = torch.constant.int 12
%659 = torch.aten.item %657 : !torch.vtensor<[],f32> -> !torch.float
%660 = torch.aten.item %658 : !torch.vtensor<[],si8> -> !torch.int
%661 = torch.aten.quantize_per_tensor %22, %659, %660, %int12_143 : !torch.vtensor<[128,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,1,1],!torch.qint8>
%662 = torch.aten.int_repr %661 : !torch.vtensor<[128,256,1,1],!torch.qint8> -> !torch.vtensor<[128,256,1,1],si8>
%663 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%664 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%665 = torch.aten.item %663 : !torch.vtensor<[],f32> -> !torch.float
%666 = torch.aten.item %664 : !torch.vtensor<[],si8> -> !torch.int
%667 = torch.aten._make_per_tensor_quantized_tensor %662, %665, %666 : !torch.vtensor<[128,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,1,1],!torch.qint8>
%668 = torch.aten.dequantize.self %667 : !torch.vtensor<[128,256,1,1],!torch.qint8> -> !torch.vtensor<[128,256,1,1],f32>
%669 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%670 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_144 = torch.constant.int 12
%671 = torch.aten.item %669 : !torch.vtensor<[],f32> -> !torch.float
%672 = torch.aten.item %670 : !torch.vtensor<[],si8> -> !torch.int
%673 = torch.aten.quantize_per_tensor %23, %671, %672, %int12_144 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%674 = torch.aten.int_repr %673 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%675 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%676 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%677 = torch.aten.item %675 : !torch.vtensor<[],f32> -> !torch.float
%678 = torch.aten.item %676 : !torch.vtensor<[],si8> -> !torch.int
%679 = torch.aten._make_per_tensor_quantized_tensor %674, %677, %678 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%680 = torch.aten.dequantize.self %679 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_145 = torch.constant.int 0
%int0_146 = torch.constant.int 0
%int1_147 = torch.constant.int 1
%int1_148 = torch.constant.int 1
%int1_149 = torch.constant.int 1
%int1_150 = torch.constant.int 1
%int0_151 = torch.constant.int 0
%681 = torch.prim.ListConstruct %int0_145, %int0_146 : (!torch.int, !torch.int) -> !torch.list<int>
%682 = torch.prim.ListConstruct %int1_147, %int1_148 : (!torch.int, !torch.int) -> !torch.list<int>
%683 = torch.prim.ListConstruct %int1_149, %int1_150 : (!torch.int, !torch.int) -> !torch.list<int>
%684 = torch.prim.ListConstruct %int0_151, %int0_151 : (!torch.int, !torch.int) -> !torch.list<int>
%false_152 = torch.constant.bool false
%int1_153 = torch.constant.int 1
%685 = torch.aten.convolution %656, %668, %680, %683, %681, %682, %false_152, %684, %int1_153 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[128,256,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,56,56],f32>
%686 = torch.aten.relu %685 : !torch.vtensor<[1,128,56,56],f32> -> !torch.vtensor<[1,128,56,56],f32>
%687 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%688 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_154 = torch.constant.int 12
%689 = torch.aten.item %687 : !torch.vtensor<[],f32> -> !torch.float
%690 = torch.aten.item %688 : !torch.vtensor<[],si8> -> !torch.int
%691 = torch.aten.quantize_per_tensor %686, %689, %690, %int12_154 : !torch.vtensor<[1,128,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,56,56],!torch.qint8>
%692 = torch.aten.int_repr %691 : !torch.vtensor<[1,128,56,56],!torch.qint8> -> !torch.vtensor<[1,128,56,56],si8>
%693 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%694 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%695 = torch.aten.item %693 : !torch.vtensor<[],f32> -> !torch.float
%696 = torch.aten.item %694 : !torch.vtensor<[],si8> -> !torch.int
%697 = torch.aten._make_per_tensor_quantized_tensor %692, %695, %696 : !torch.vtensor<[1,128,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,56,56],!torch.qint8>
%698 = torch.aten.dequantize.self %697 : !torch.vtensor<[1,128,56,56],!torch.qint8> -> !torch.vtensor<[1,128,56,56],f32>
%699 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%700 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_155 = torch.constant.int 12
%701 = torch.aten.item %699 : !torch.vtensor<[],f32> -> !torch.float
%702 = torch.aten.item %700 : !torch.vtensor<[],si8> -> !torch.int
%703 = torch.aten.quantize_per_tensor %24, %701, %702, %int12_155 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%704 = torch.aten.int_repr %703 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%705 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%706 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%707 = torch.aten.item %705 : !torch.vtensor<[],f32> -> !torch.float
%708 = torch.aten.item %706 : !torch.vtensor<[],si8> -> !torch.int
%709 = torch.aten._make_per_tensor_quantized_tensor %704, %707, %708 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%710 = torch.aten.dequantize.self %709 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%711 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%712 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_156 = torch.constant.int 12
%713 = torch.aten.item %711 : !torch.vtensor<[],f32> -> !torch.float
%714 = torch.aten.item %712 : !torch.vtensor<[],si8> -> !torch.int
%715 = torch.aten.quantize_per_tensor %25, %713, %714, %int12_156 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%716 = torch.aten.int_repr %715 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%717 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%718 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%719 = torch.aten.item %717 : !torch.vtensor<[],f32> -> !torch.float
%720 = torch.aten.item %718 : !torch.vtensor<[],si8> -> !torch.int
%721 = torch.aten._make_per_tensor_quantized_tensor %716, %719, %720 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%722 = torch.aten.dequantize.self %721 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_157 = torch.constant.int 1
%int1_158 = torch.constant.int 1
%int1_159 = torch.constant.int 1
%int1_160 = torch.constant.int 1
%int2_161 = torch.constant.int 2
%int2_162 = torch.constant.int 2
%int0_163 = torch.constant.int 0
%723 = torch.prim.ListConstruct %int1_157, %int1_158 : (!torch.int, !torch.int) -> !torch.list<int>
%724 = torch.prim.ListConstruct %int1_159, %int1_160 : (!torch.int, !torch.int) -> !torch.list<int>
%725 = torch.prim.ListConstruct %int2_161, %int2_162 : (!torch.int, !torch.int) -> !torch.list<int>
%726 = torch.prim.ListConstruct %int0_163, %int0_163 : (!torch.int, !torch.int) -> !torch.list<int>
%false_164 = torch.constant.bool false
%int1_165 = torch.constant.int 1
%727 = torch.aten.convolution %698, %710, %722, %725, %723, %724, %false_164, %726, %int1_165 : !torch.vtensor<[1,128,56,56],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%728 = torch.aten.relu %727 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%729 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%730 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_166 = torch.constant.int 12
%731 = torch.aten.item %729 : !torch.vtensor<[],f32> -> !torch.float
%732 = torch.aten.item %730 : !torch.vtensor<[],si8> -> !torch.int
%733 = torch.aten.quantize_per_tensor %728, %731, %732, %int12_166 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%734 = torch.aten.int_repr %733 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%735 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%736 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%737 = torch.aten.item %735 : !torch.vtensor<[],f32> -> !torch.float
%738 = torch.aten.item %736 : !torch.vtensor<[],si8> -> !torch.int
%739 = torch.aten._make_per_tensor_quantized_tensor %734, %737, %738 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%740 = torch.aten.dequantize.self %739 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%741 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%742 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_167 = torch.constant.int 12
%743 = torch.aten.item %741 : !torch.vtensor<[],f32> -> !torch.float
%744 = torch.aten.item %742 : !torch.vtensor<[],si8> -> !torch.int
%745 = torch.aten.quantize_per_tensor %26, %743, %744, %int12_167 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%746 = torch.aten.int_repr %745 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%747 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%748 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%749 = torch.aten.item %747 : !torch.vtensor<[],f32> -> !torch.float
%750 = torch.aten.item %748 : !torch.vtensor<[],si8> -> !torch.int
%751 = torch.aten._make_per_tensor_quantized_tensor %746, %749, %750 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%752 = torch.aten.dequantize.self %751 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%753 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%754 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_168 = torch.constant.int 12
%755 = torch.aten.item %753 : !torch.vtensor<[],f32> -> !torch.float
%756 = torch.aten.item %754 : !torch.vtensor<[],si8> -> !torch.int
%757 = torch.aten.quantize_per_tensor %27, %755, %756, %int12_168 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%758 = torch.aten.int_repr %757 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%759 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%760 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%761 = torch.aten.item %759 : !torch.vtensor<[],f32> -> !torch.float
%762 = torch.aten.item %760 : !torch.vtensor<[],si8> -> !torch.int
%763 = torch.aten._make_per_tensor_quantized_tensor %758, %761, %762 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%764 = torch.aten.dequantize.self %763 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_169 = torch.constant.int 0
%int0_170 = torch.constant.int 0
%int1_171 = torch.constant.int 1
%int1_172 = torch.constant.int 1
%int1_173 = torch.constant.int 1
%int1_174 = torch.constant.int 1
%int0_175 = torch.constant.int 0
%765 = torch.prim.ListConstruct %int0_169, %int0_170 : (!torch.int, !torch.int) -> !torch.list<int>
%766 = torch.prim.ListConstruct %int1_171, %int1_172 : (!torch.int, !torch.int) -> !torch.list<int>
%767 = torch.prim.ListConstruct %int1_173, %int1_174 : (!torch.int, !torch.int) -> !torch.list<int>
%768 = torch.prim.ListConstruct %int0_175, %int0_175 : (!torch.int, !torch.int) -> !torch.list<int>
%false_176 = torch.constant.bool false
%int1_177 = torch.constant.int 1
%769 = torch.aten.convolution %740, %752, %764, %767, %765, %766, %false_176, %768, %int1_177 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%770 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%771 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_178 = torch.constant.int 12
%772 = torch.aten.item %770 : !torch.vtensor<[],f32> -> !torch.float
%773 = torch.aten.item %771 : !torch.vtensor<[],si8> -> !torch.int
%774 = torch.aten.quantize_per_tensor %769, %772, %773, %int12_178 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%775 = torch.aten.int_repr %774 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%776 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%777 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%778 = torch.aten.item %776 : !torch.vtensor<[],f32> -> !torch.float
%779 = torch.aten.item %777 : !torch.vtensor<[],si8> -> !torch.int
%780 = torch.aten._make_per_tensor_quantized_tensor %775, %778, %779 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%781 = torch.aten.dequantize.self %780 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%782 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_179 = torch.constant.int 12
%784 = torch.aten.item %782 : !torch.vtensor<[],f32> -> !torch.float
%785 = torch.aten.item %783 : !torch.vtensor<[],si8> -> !torch.int
%786 = torch.aten.quantize_per_tensor %28, %784, %785, %int12_179 : !torch.vtensor<[512,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,256,1,1],!torch.qint8>
%787 = torch.aten.int_repr %786 : !torch.vtensor<[512,256,1,1],!torch.qint8> -> !torch.vtensor<[512,256,1,1],si8>
%788 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%789 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%790 = torch.aten.item %788 : !torch.vtensor<[],f32> -> !torch.float
%791 = torch.aten.item %789 : !torch.vtensor<[],si8> -> !torch.int
%792 = torch.aten._make_per_tensor_quantized_tensor %787, %790, %791 : !torch.vtensor<[512,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,256,1,1],!torch.qint8>
%793 = torch.aten.dequantize.self %792 : !torch.vtensor<[512,256,1,1],!torch.qint8> -> !torch.vtensor<[512,256,1,1],f32>
%794 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_180 = torch.constant.int 12
%796 = torch.aten.item %794 : !torch.vtensor<[],f32> -> !torch.float
%797 = torch.aten.item %795 : !torch.vtensor<[],si8> -> !torch.int
%798 = torch.aten.quantize_per_tensor %29, %796, %797, %int12_180 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%799 = torch.aten.int_repr %798 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%800 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%802 = torch.aten.item %800 : !torch.vtensor<[],f32> -> !torch.float
%803 = torch.aten.item %801 : !torch.vtensor<[],si8> -> !torch.int
%804 = torch.aten._make_per_tensor_quantized_tensor %799, %802, %803 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%805 = torch.aten.dequantize.self %804 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_181 = torch.constant.int 0
%int0_182 = torch.constant.int 0
%int1_183 = torch.constant.int 1
%int1_184 = torch.constant.int 1
%int2_185 = torch.constant.int 2
%int2_186 = torch.constant.int 2
%int0_187 = torch.constant.int 0
%806 = torch.prim.ListConstruct %int0_181, %int0_182 : (!torch.int, !torch.int) -> !torch.list<int>
%807 = torch.prim.ListConstruct %int1_183, %int1_184 : (!torch.int, !torch.int) -> !torch.list<int>
%808 = torch.prim.ListConstruct %int2_185, %int2_186 : (!torch.int, !torch.int) -> !torch.list<int>
%809 = torch.prim.ListConstruct %int0_187, %int0_187 : (!torch.int, !torch.int) -> !torch.list<int>
%false_188 = torch.constant.bool false
%int1_189 = torch.constant.int 1
%810 = torch.aten.convolution %656, %793, %805, %808, %806, %807, %false_188, %809, %int1_189 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[512,256,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%811 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%812 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_190 = torch.constant.int 12
%813 = torch.aten.item %811 : !torch.vtensor<[],f32> -> !torch.float
%814 = torch.aten.item %812 : !torch.vtensor<[],si8> -> !torch.int
%815 = torch.aten.quantize_per_tensor %810, %813, %814, %int12_190 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%816 = torch.aten.int_repr %815 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%817 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%818 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%819 = torch.aten.item %817 : !torch.vtensor<[],f32> -> !torch.float
%820 = torch.aten.item %818 : !torch.vtensor<[],si8> -> !torch.int
%821 = torch.aten._make_per_tensor_quantized_tensor %816, %819, %820 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%822 = torch.aten.dequantize.self %821 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_191 = torch.constant.int 1
%823 = torch.aten.add.Tensor %781, %822, %int1_191 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%824 = torch.aten.relu %823 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%825 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%826 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_192 = torch.constant.int 12
%827 = torch.aten.item %825 : !torch.vtensor<[],f32> -> !torch.float
%828 = torch.aten.item %826 : !torch.vtensor<[],si8> -> !torch.int
%829 = torch.aten.quantize_per_tensor %824, %827, %828, %int12_192 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%830 = torch.aten.int_repr %829 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%831 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%832 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%833 = torch.aten.item %831 : !torch.vtensor<[],f32> -> !torch.float
%834 = torch.aten.item %832 : !torch.vtensor<[],si8> -> !torch.int
%835 = torch.aten._make_per_tensor_quantized_tensor %830, %833, %834 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%836 = torch.aten.dequantize.self %835 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%837 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%838 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_193 = torch.constant.int 12
%839 = torch.aten.item %837 : !torch.vtensor<[],f32> -> !torch.float
%840 = torch.aten.item %838 : !torch.vtensor<[],si8> -> !torch.int
%841 = torch.aten.quantize_per_tensor %30, %839, %840, %int12_193 : !torch.vtensor<[128,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%842 = torch.aten.int_repr %841 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],si8>
%843 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%844 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%845 = torch.aten.item %843 : !torch.vtensor<[],f32> -> !torch.float
%846 = torch.aten.item %844 : !torch.vtensor<[],si8> -> !torch.int
%847 = torch.aten._make_per_tensor_quantized_tensor %842, %845, %846 : !torch.vtensor<[128,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%848 = torch.aten.dequantize.self %847 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],f32>
%849 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%850 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_194 = torch.constant.int 12
%851 = torch.aten.item %849 : !torch.vtensor<[],f32> -> !torch.float
%852 = torch.aten.item %850 : !torch.vtensor<[],si8> -> !torch.int
%853 = torch.aten.quantize_per_tensor %31, %851, %852, %int12_194 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%854 = torch.aten.int_repr %853 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%855 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%856 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%857 = torch.aten.item %855 : !torch.vtensor<[],f32> -> !torch.float
%858 = torch.aten.item %856 : !torch.vtensor<[],si8> -> !torch.int
%859 = torch.aten._make_per_tensor_quantized_tensor %854, %857, %858 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%860 = torch.aten.dequantize.self %859 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_195 = torch.constant.int 0
%int0_196 = torch.constant.int 0
%int1_197 = torch.constant.int 1
%int1_198 = torch.constant.int 1
%int1_199 = torch.constant.int 1
%int1_200 = torch.constant.int 1
%int0_201 = torch.constant.int 0
%861 = torch.prim.ListConstruct %int0_195, %int0_196 : (!torch.int, !torch.int) -> !torch.list<int>
%862 = torch.prim.ListConstruct %int1_197, %int1_198 : (!torch.int, !torch.int) -> !torch.list<int>
%863 = torch.prim.ListConstruct %int1_199, %int1_200 : (!torch.int, !torch.int) -> !torch.list<int>
%864 = torch.prim.ListConstruct %int0_201, %int0_201 : (!torch.int, !torch.int) -> !torch.list<int>
%false_202 = torch.constant.bool false
%int1_203 = torch.constant.int 1
%865 = torch.aten.convolution %836, %848, %860, %863, %861, %862, %false_202, %864, %int1_203 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[128,512,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%866 = torch.aten.relu %865 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%867 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%868 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_204 = torch.constant.int 12
%869 = torch.aten.item %867 : !torch.vtensor<[],f32> -> !torch.float
%870 = torch.aten.item %868 : !torch.vtensor<[],si8> -> !torch.int
%871 = torch.aten.quantize_per_tensor %866, %869, %870, %int12_204 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%872 = torch.aten.int_repr %871 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%873 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%874 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%875 = torch.aten.item %873 : !torch.vtensor<[],f32> -> !torch.float
%876 = torch.aten.item %874 : !torch.vtensor<[],si8> -> !torch.int
%877 = torch.aten._make_per_tensor_quantized_tensor %872, %875, %876 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%878 = torch.aten.dequantize.self %877 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%879 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%880 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_205 = torch.constant.int 12
%881 = torch.aten.item %879 : !torch.vtensor<[],f32> -> !torch.float
%882 = torch.aten.item %880 : !torch.vtensor<[],si8> -> !torch.int
%883 = torch.aten.quantize_per_tensor %32, %881, %882, %int12_205 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%884 = torch.aten.int_repr %883 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%885 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%886 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%887 = torch.aten.item %885 : !torch.vtensor<[],f32> -> !torch.float
%888 = torch.aten.item %886 : !torch.vtensor<[],si8> -> !torch.int
%889 = torch.aten._make_per_tensor_quantized_tensor %884, %887, %888 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%890 = torch.aten.dequantize.self %889 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%891 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%892 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_206 = torch.constant.int 12
%893 = torch.aten.item %891 : !torch.vtensor<[],f32> -> !torch.float
%894 = torch.aten.item %892 : !torch.vtensor<[],si8> -> !torch.int
%895 = torch.aten.quantize_per_tensor %33, %893, %894, %int12_206 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%896 = torch.aten.int_repr %895 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%897 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%898 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%899 = torch.aten.item %897 : !torch.vtensor<[],f32> -> !torch.float
%900 = torch.aten.item %898 : !torch.vtensor<[],si8> -> !torch.int
%901 = torch.aten._make_per_tensor_quantized_tensor %896, %899, %900 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%902 = torch.aten.dequantize.self %901 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_207 = torch.constant.int 1
%int1_208 = torch.constant.int 1
%int1_209 = torch.constant.int 1
%int1_210 = torch.constant.int 1
%int1_211 = torch.constant.int 1
%int1_212 = torch.constant.int 1
%int0_213 = torch.constant.int 0
%903 = torch.prim.ListConstruct %int1_207, %int1_208 : (!torch.int, !torch.int) -> !torch.list<int>
%904 = torch.prim.ListConstruct %int1_209, %int1_210 : (!torch.int, !torch.int) -> !torch.list<int>
%905 = torch.prim.ListConstruct %int1_211, %int1_212 : (!torch.int, !torch.int) -> !torch.list<int>
%906 = torch.prim.ListConstruct %int0_213, %int0_213 : (!torch.int, !torch.int) -> !torch.list<int>
%false_214 = torch.constant.bool false
%int1_215 = torch.constant.int 1
%907 = torch.aten.convolution %878, %890, %902, %905, %903, %904, %false_214, %906, %int1_215 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%908 = torch.aten.relu %907 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%909 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%910 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_216 = torch.constant.int 12
%911 = torch.aten.item %909 : !torch.vtensor<[],f32> -> !torch.float
%912 = torch.aten.item %910 : !torch.vtensor<[],si8> -> !torch.int
%913 = torch.aten.quantize_per_tensor %908, %911, %912, %int12_216 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%914 = torch.aten.int_repr %913 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%915 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%916 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%917 = torch.aten.item %915 : !torch.vtensor<[],f32> -> !torch.float
%918 = torch.aten.item %916 : !torch.vtensor<[],si8> -> !torch.int
%919 = torch.aten._make_per_tensor_quantized_tensor %914, %917, %918 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%920 = torch.aten.dequantize.self %919 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%921 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%922 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_217 = torch.constant.int 12
%923 = torch.aten.item %921 : !torch.vtensor<[],f32> -> !torch.float
%924 = torch.aten.item %922 : !torch.vtensor<[],si8> -> !torch.int
%925 = torch.aten.quantize_per_tensor %34, %923, %924, %int12_217 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%926 = torch.aten.int_repr %925 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%927 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%928 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%929 = torch.aten.item %927 : !torch.vtensor<[],f32> -> !torch.float
%930 = torch.aten.item %928 : !torch.vtensor<[],si8> -> !torch.int
%931 = torch.aten._make_per_tensor_quantized_tensor %926, %929, %930 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%932 = torch.aten.dequantize.self %931 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%933 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%934 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_218 = torch.constant.int 12
%935 = torch.aten.item %933 : !torch.vtensor<[],f32> -> !torch.float
%936 = torch.aten.item %934 : !torch.vtensor<[],si8> -> !torch.int
%937 = torch.aten.quantize_per_tensor %35, %935, %936, %int12_218 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%938 = torch.aten.int_repr %937 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%939 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%940 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%941 = torch.aten.item %939 : !torch.vtensor<[],f32> -> !torch.float
%942 = torch.aten.item %940 : !torch.vtensor<[],si8> -> !torch.int
%943 = torch.aten._make_per_tensor_quantized_tensor %938, %941, %942 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%944 = torch.aten.dequantize.self %943 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_219 = torch.constant.int 0
%int0_220 = torch.constant.int 0
%int1_221 = torch.constant.int 1
%int1_222 = torch.constant.int 1
%int1_223 = torch.constant.int 1
%int1_224 = torch.constant.int 1
%int0_225 = torch.constant.int 0
%945 = torch.prim.ListConstruct %int0_219, %int0_220 : (!torch.int, !torch.int) -> !torch.list<int>
%946 = torch.prim.ListConstruct %int1_221, %int1_222 : (!torch.int, !torch.int) -> !torch.list<int>
%947 = torch.prim.ListConstruct %int1_223, %int1_224 : (!torch.int, !torch.int) -> !torch.list<int>
%948 = torch.prim.ListConstruct %int0_225, %int0_225 : (!torch.int, !torch.int) -> !torch.list<int>
%false_226 = torch.constant.bool false
%int1_227 = torch.constant.int 1
%949 = torch.aten.convolution %920, %932, %944, %947, %945, %946, %false_226, %948, %int1_227 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%950 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_228 = torch.constant.int 12
%952 = torch.aten.item %950 : !torch.vtensor<[],f32> -> !torch.float
%953 = torch.aten.item %951 : !torch.vtensor<[],si8> -> !torch.int
%954 = torch.aten.quantize_per_tensor %949, %952, %953, %int12_228 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%955 = torch.aten.int_repr %954 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%956 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%958 = torch.aten.item %956 : !torch.vtensor<[],f32> -> !torch.float
%959 = torch.aten.item %957 : !torch.vtensor<[],si8> -> !torch.int
%960 = torch.aten._make_per_tensor_quantized_tensor %955, %958, %959 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%961 = torch.aten.dequantize.self %960 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_229 = torch.constant.int 1
%962 = torch.aten.add.Tensor %961, %836, %int1_229 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%963 = torch.aten.relu %962 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%964 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%965 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_230 = torch.constant.int 12
%966 = torch.aten.item %964 : !torch.vtensor<[],f32> -> !torch.float
%967 = torch.aten.item %965 : !torch.vtensor<[],si8> -> !torch.int
%968 = torch.aten.quantize_per_tensor %963, %966, %967, %int12_230 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%969 = torch.aten.int_repr %968 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%970 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%971 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%972 = torch.aten.item %970 : !torch.vtensor<[],f32> -> !torch.float
%973 = torch.aten.item %971 : !torch.vtensor<[],si8> -> !torch.int
%974 = torch.aten._make_per_tensor_quantized_tensor %969, %972, %973 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%975 = torch.aten.dequantize.self %974 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%976 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%977 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_231 = torch.constant.int 12
%978 = torch.aten.item %976 : !torch.vtensor<[],f32> -> !torch.float
%979 = torch.aten.item %977 : !torch.vtensor<[],si8> -> !torch.int
%980 = torch.aten.quantize_per_tensor %36, %978, %979, %int12_231 : !torch.vtensor<[128,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%981 = torch.aten.int_repr %980 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],si8>
%982 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%983 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%984 = torch.aten.item %982 : !torch.vtensor<[],f32> -> !torch.float
%985 = torch.aten.item %983 : !torch.vtensor<[],si8> -> !torch.int
%986 = torch.aten._make_per_tensor_quantized_tensor %981, %984, %985 : !torch.vtensor<[128,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%987 = torch.aten.dequantize.self %986 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],f32>
%988 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%989 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_232 = torch.constant.int 12
%990 = torch.aten.item %988 : !torch.vtensor<[],f32> -> !torch.float
%991 = torch.aten.item %989 : !torch.vtensor<[],si8> -> !torch.int
%992 = torch.aten.quantize_per_tensor %37, %990, %991, %int12_232 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%993 = torch.aten.int_repr %992 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%994 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%995 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%996 = torch.aten.item %994 : !torch.vtensor<[],f32> -> !torch.float
%997 = torch.aten.item %995 : !torch.vtensor<[],si8> -> !torch.int
%998 = torch.aten._make_per_tensor_quantized_tensor %993, %996, %997 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%999 = torch.aten.dequantize.self %998 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_233 = torch.constant.int 0
%int0_234 = torch.constant.int 0
%int1_235 = torch.constant.int 1
%int1_236 = torch.constant.int 1
%int1_237 = torch.constant.int 1
%int1_238 = torch.constant.int 1
%int0_239 = torch.constant.int 0
%1000 = torch.prim.ListConstruct %int0_233, %int0_234 : (!torch.int, !torch.int) -> !torch.list<int>
%1001 = torch.prim.ListConstruct %int1_235, %int1_236 : (!torch.int, !torch.int) -> !torch.list<int>
%1002 = torch.prim.ListConstruct %int1_237, %int1_238 : (!torch.int, !torch.int) -> !torch.list<int>
%1003 = torch.prim.ListConstruct %int0_239, %int0_239 : (!torch.int, !torch.int) -> !torch.list<int>
%false_240 = torch.constant.bool false
%int1_241 = torch.constant.int 1
%1004 = torch.aten.convolution %975, %987, %999, %1002, %1000, %1001, %false_240, %1003, %int1_241 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[128,512,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1005 = torch.aten.relu %1004 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1006 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1007 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_242 = torch.constant.int 12
%1008 = torch.aten.item %1006 : !torch.vtensor<[],f32> -> !torch.float
%1009 = torch.aten.item %1007 : !torch.vtensor<[],si8> -> !torch.int
%1010 = torch.aten.quantize_per_tensor %1005, %1008, %1009, %int12_242 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1011 = torch.aten.int_repr %1010 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1012 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1013 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1014 = torch.aten.item %1012 : !torch.vtensor<[],f32> -> !torch.float
%1015 = torch.aten.item %1013 : !torch.vtensor<[],si8> -> !torch.int
%1016 = torch.aten._make_per_tensor_quantized_tensor %1011, %1014, %1015 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1017 = torch.aten.dequantize.self %1016 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1018 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1019 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_243 = torch.constant.int 12
%1020 = torch.aten.item %1018 : !torch.vtensor<[],f32> -> !torch.float
%1021 = torch.aten.item %1019 : !torch.vtensor<[],si8> -> !torch.int
%1022 = torch.aten.quantize_per_tensor %38, %1020, %1021, %int12_243 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1023 = torch.aten.int_repr %1022 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%1024 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1025 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1026 = torch.aten.item %1024 : !torch.vtensor<[],f32> -> !torch.float
%1027 = torch.aten.item %1025 : !torch.vtensor<[],si8> -> !torch.int
%1028 = torch.aten._make_per_tensor_quantized_tensor %1023, %1026, %1027 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1029 = torch.aten.dequantize.self %1028 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%1030 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1031 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_244 = torch.constant.int 12
%1032 = torch.aten.item %1030 : !torch.vtensor<[],f32> -> !torch.float
%1033 = torch.aten.item %1031 : !torch.vtensor<[],si8> -> !torch.int
%1034 = torch.aten.quantize_per_tensor %39, %1032, %1033, %int12_244 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1035 = torch.aten.int_repr %1034 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1036 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1037 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1038 = torch.aten.item %1036 : !torch.vtensor<[],f32> -> !torch.float
%1039 = torch.aten.item %1037 : !torch.vtensor<[],si8> -> !torch.int
%1040 = torch.aten._make_per_tensor_quantized_tensor %1035, %1038, %1039 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1041 = torch.aten.dequantize.self %1040 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_245 = torch.constant.int 1
%int1_246 = torch.constant.int 1
%int1_247 = torch.constant.int 1
%int1_248 = torch.constant.int 1
%int1_249 = torch.constant.int 1
%int1_250 = torch.constant.int 1
%int0_251 = torch.constant.int 0
%1042 = torch.prim.ListConstruct %int1_245, %int1_246 : (!torch.int, !torch.int) -> !torch.list<int>
%1043 = torch.prim.ListConstruct %int1_247, %int1_248 : (!torch.int, !torch.int) -> !torch.list<int>
%1044 = torch.prim.ListConstruct %int1_249, %int1_250 : (!torch.int, !torch.int) -> !torch.list<int>
%1045 = torch.prim.ListConstruct %int0_251, %int0_251 : (!torch.int, !torch.int) -> !torch.list<int>
%false_252 = torch.constant.bool false
%int1_253 = torch.constant.int 1
%1046 = torch.aten.convolution %1017, %1029, %1041, %1044, %1042, %1043, %false_252, %1045, %int1_253 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1047 = torch.aten.relu %1046 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1048 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1049 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_254 = torch.constant.int 12
%1050 = torch.aten.item %1048 : !torch.vtensor<[],f32> -> !torch.float
%1051 = torch.aten.item %1049 : !torch.vtensor<[],si8> -> !torch.int
%1052 = torch.aten.quantize_per_tensor %1047, %1050, %1051, %int12_254 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1053 = torch.aten.int_repr %1052 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1054 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1055 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1056 = torch.aten.item %1054 : !torch.vtensor<[],f32> -> !torch.float
%1057 = torch.aten.item %1055 : !torch.vtensor<[],si8> -> !torch.int
%1058 = torch.aten._make_per_tensor_quantized_tensor %1053, %1056, %1057 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1059 = torch.aten.dequantize.self %1058 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1060 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1061 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_255 = torch.constant.int 12
%1062 = torch.aten.item %1060 : !torch.vtensor<[],f32> -> !torch.float
%1063 = torch.aten.item %1061 : !torch.vtensor<[],si8> -> !torch.int
%1064 = torch.aten.quantize_per_tensor %40, %1062, %1063, %int12_255 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1065 = torch.aten.int_repr %1064 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%1066 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1067 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1068 = torch.aten.item %1066 : !torch.vtensor<[],f32> -> !torch.float
%1069 = torch.aten.item %1067 : !torch.vtensor<[],si8> -> !torch.int
%1070 = torch.aten._make_per_tensor_quantized_tensor %1065, %1068, %1069 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1071 = torch.aten.dequantize.self %1070 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%1072 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1073 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_256 = torch.constant.int 12
%1074 = torch.aten.item %1072 : !torch.vtensor<[],f32> -> !torch.float
%1075 = torch.aten.item %1073 : !torch.vtensor<[],si8> -> !torch.int
%1076 = torch.aten.quantize_per_tensor %41, %1074, %1075, %int12_256 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1077 = torch.aten.int_repr %1076 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%1078 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1079 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1080 = torch.aten.item %1078 : !torch.vtensor<[],f32> -> !torch.float
%1081 = torch.aten.item %1079 : !torch.vtensor<[],si8> -> !torch.int
%1082 = torch.aten._make_per_tensor_quantized_tensor %1077, %1080, %1081 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1083 = torch.aten.dequantize.self %1082 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_257 = torch.constant.int 0
%int0_258 = torch.constant.int 0
%int1_259 = torch.constant.int 1
%int1_260 = torch.constant.int 1
%int1_261 = torch.constant.int 1
%int1_262 = torch.constant.int 1
%int0_263 = torch.constant.int 0
%1084 = torch.prim.ListConstruct %int0_257, %int0_258 : (!torch.int, !torch.int) -> !torch.list<int>
%1085 = torch.prim.ListConstruct %int1_259, %int1_260 : (!torch.int, !torch.int) -> !torch.list<int>
%1086 = torch.prim.ListConstruct %int1_261, %int1_262 : (!torch.int, !torch.int) -> !torch.list<int>
%1087 = torch.prim.ListConstruct %int0_263, %int0_263 : (!torch.int, !torch.int) -> !torch.list<int>
%false_264 = torch.constant.bool false
%int1_265 = torch.constant.int 1
%1088 = torch.aten.convolution %1059, %1071, %1083, %1086, %1084, %1085, %false_264, %1087, %int1_265 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1089 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1090 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_266 = torch.constant.int 12
%1091 = torch.aten.item %1089 : !torch.vtensor<[],f32> -> !torch.float
%1092 = torch.aten.item %1090 : !torch.vtensor<[],si8> -> !torch.int
%1093 = torch.aten.quantize_per_tensor %1088, %1091, %1092, %int12_266 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1094 = torch.aten.int_repr %1093 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1095 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1096 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1097 = torch.aten.item %1095 : !torch.vtensor<[],f32> -> !torch.float
%1098 = torch.aten.item %1096 : !torch.vtensor<[],si8> -> !torch.int
%1099 = torch.aten._make_per_tensor_quantized_tensor %1094, %1097, %1098 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1100 = torch.aten.dequantize.self %1099 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_267 = torch.constant.int 1
%1101 = torch.aten.add.Tensor %1100, %975, %int1_267 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1102 = torch.aten.relu %1101 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%1103 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1104 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_268 = torch.constant.int 12
%1105 = torch.aten.item %1103 : !torch.vtensor<[],f32> -> !torch.float
%1106 = torch.aten.item %1104 : !torch.vtensor<[],si8> -> !torch.int
%1107 = torch.aten.quantize_per_tensor %1102, %1105, %1106, %int12_268 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1108 = torch.aten.int_repr %1107 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1109 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1111 = torch.aten.item %1109 : !torch.vtensor<[],f32> -> !torch.float
%1112 = torch.aten.item %1110 : !torch.vtensor<[],si8> -> !torch.int
%1113 = torch.aten._make_per_tensor_quantized_tensor %1108, %1111, %1112 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1114 = torch.aten.dequantize.self %1113 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%1115 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1116 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_269 = torch.constant.int 12
%1117 = torch.aten.item %1115 : !torch.vtensor<[],f32> -> !torch.float
%1118 = torch.aten.item %1116 : !torch.vtensor<[],si8> -> !torch.int
%1119 = torch.aten.quantize_per_tensor %42, %1117, %1118, %int12_269 : !torch.vtensor<[128,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%1120 = torch.aten.int_repr %1119 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],si8>
%1121 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1122 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1123 = torch.aten.item %1121 : !torch.vtensor<[],f32> -> !torch.float
%1124 = torch.aten.item %1122 : !torch.vtensor<[],si8> -> !torch.int
%1125 = torch.aten._make_per_tensor_quantized_tensor %1120, %1123, %1124 : !torch.vtensor<[128,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%1126 = torch.aten.dequantize.self %1125 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],f32>
%1127 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1128 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_270 = torch.constant.int 12
%1129 = torch.aten.item %1127 : !torch.vtensor<[],f32> -> !torch.float
%1130 = torch.aten.item %1128 : !torch.vtensor<[],si8> -> !torch.int
%1131 = torch.aten.quantize_per_tensor %43, %1129, %1130, %int12_270 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1132 = torch.aten.int_repr %1131 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1133 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1134 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1135 = torch.aten.item %1133 : !torch.vtensor<[],f32> -> !torch.float
%1136 = torch.aten.item %1134 : !torch.vtensor<[],si8> -> !torch.int
%1137 = torch.aten._make_per_tensor_quantized_tensor %1132, %1135, %1136 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1138 = torch.aten.dequantize.self %1137 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_271 = torch.constant.int 0
%int0_272 = torch.constant.int 0
%int1_273 = torch.constant.int 1
%int1_274 = torch.constant.int 1
%int1_275 = torch.constant.int 1
%int1_276 = torch.constant.int 1
%int0_277 = torch.constant.int 0
%1139 = torch.prim.ListConstruct %int0_271, %int0_272 : (!torch.int, !torch.int) -> !torch.list<int>
%1140 = torch.prim.ListConstruct %int1_273, %int1_274 : (!torch.int, !torch.int) -> !torch.list<int>
%1141 = torch.prim.ListConstruct %int1_275, %int1_276 : (!torch.int, !torch.int) -> !torch.list<int>
%1142 = torch.prim.ListConstruct %int0_277, %int0_277 : (!torch.int, !torch.int) -> !torch.list<int>
%false_278 = torch.constant.bool false
%int1_279 = torch.constant.int 1
%1143 = torch.aten.convolution %1114, %1126, %1138, %1141, %1139, %1140, %false_278, %1142, %int1_279 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[128,512,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1144 = torch.aten.relu %1143 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1145 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1146 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_280 = torch.constant.int 12
%1147 = torch.aten.item %1145 : !torch.vtensor<[],f32> -> !torch.float
%1148 = torch.aten.item %1146 : !torch.vtensor<[],si8> -> !torch.int
%1149 = torch.aten.quantize_per_tensor %1144, %1147, %1148, %int12_280 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1150 = torch.aten.int_repr %1149 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1151 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1153 = torch.aten.item %1151 : !torch.vtensor<[],f32> -> !torch.float
%1154 = torch.aten.item %1152 : !torch.vtensor<[],si8> -> !torch.int
%1155 = torch.aten._make_per_tensor_quantized_tensor %1150, %1153, %1154 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1156 = torch.aten.dequantize.self %1155 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1157 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_281 = torch.constant.int 12
%1159 = torch.aten.item %1157 : !torch.vtensor<[],f32> -> !torch.float
%1160 = torch.aten.item %1158 : !torch.vtensor<[],si8> -> !torch.int
%1161 = torch.aten.quantize_per_tensor %44, %1159, %1160, %int12_281 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1162 = torch.aten.int_repr %1161 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%1163 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1165 = torch.aten.item %1163 : !torch.vtensor<[],f32> -> !torch.float
%1166 = torch.aten.item %1164 : !torch.vtensor<[],si8> -> !torch.int
%1167 = torch.aten._make_per_tensor_quantized_tensor %1162, %1165, %1166 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1168 = torch.aten.dequantize.self %1167 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%1169 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_282 = torch.constant.int 12
%1171 = torch.aten.item %1169 : !torch.vtensor<[],f32> -> !torch.float
%1172 = torch.aten.item %1170 : !torch.vtensor<[],si8> -> !torch.int
%1173 = torch.aten.quantize_per_tensor %45, %1171, %1172, %int12_282 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1174 = torch.aten.int_repr %1173 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1175 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1176 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1177 = torch.aten.item %1175 : !torch.vtensor<[],f32> -> !torch.float
%1178 = torch.aten.item %1176 : !torch.vtensor<[],si8> -> !torch.int
%1179 = torch.aten._make_per_tensor_quantized_tensor %1174, %1177, %1178 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1180 = torch.aten.dequantize.self %1179 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_283 = torch.constant.int 1
%int1_284 = torch.constant.int 1
%int1_285 = torch.constant.int 1
%int1_286 = torch.constant.int 1
%int1_287 = torch.constant.int 1
%int1_288 = torch.constant.int 1
%int0_289 = torch.constant.int 0
%1181 = torch.prim.ListConstruct %int1_283, %int1_284 : (!torch.int, !torch.int) -> !torch.list<int>
%1182 = torch.prim.ListConstruct %int1_285, %int1_286 : (!torch.int, !torch.int) -> !torch.list<int>
%1183 = torch.prim.ListConstruct %int1_287, %int1_288 : (!torch.int, !torch.int) -> !torch.list<int>
%1184 = torch.prim.ListConstruct %int0_289, %int0_289 : (!torch.int, !torch.int) -> !torch.list<int>
%false_290 = torch.constant.bool false
%int1_291 = torch.constant.int 1
%1185 = torch.aten.convolution %1156, %1168, %1180, %1183, %1181, %1182, %false_290, %1184, %int1_291 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1186 = torch.aten.relu %1185 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1187 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1188 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_292 = torch.constant.int 12
%1189 = torch.aten.item %1187 : !torch.vtensor<[],f32> -> !torch.float
%1190 = torch.aten.item %1188 : !torch.vtensor<[],si8> -> !torch.int
%1191 = torch.aten.quantize_per_tensor %1186, %1189, %1190, %int12_292 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1192 = torch.aten.int_repr %1191 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1193 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1195 = torch.aten.item %1193 : !torch.vtensor<[],f32> -> !torch.float
%1196 = torch.aten.item %1194 : !torch.vtensor<[],si8> -> !torch.int
%1197 = torch.aten._make_per_tensor_quantized_tensor %1192, %1195, %1196 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1198 = torch.aten.dequantize.self %1197 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1199 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_293 = torch.constant.int 12
%1201 = torch.aten.item %1199 : !torch.vtensor<[],f32> -> !torch.float
%1202 = torch.aten.item %1200 : !torch.vtensor<[],si8> -> !torch.int
%1203 = torch.aten.quantize_per_tensor %46, %1201, %1202, %int12_293 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1204 = torch.aten.int_repr %1203 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%1205 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1207 = torch.aten.item %1205 : !torch.vtensor<[],f32> -> !torch.float
%1208 = torch.aten.item %1206 : !torch.vtensor<[],si8> -> !torch.int
%1209 = torch.aten._make_per_tensor_quantized_tensor %1204, %1207, %1208 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1210 = torch.aten.dequantize.self %1209 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%1211 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_294 = torch.constant.int 12
%1213 = torch.aten.item %1211 : !torch.vtensor<[],f32> -> !torch.float
%1214 = torch.aten.item %1212 : !torch.vtensor<[],si8> -> !torch.int
%1215 = torch.aten.quantize_per_tensor %47, %1213, %1214, %int12_294 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1216 = torch.aten.int_repr %1215 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%1217 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1218 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1219 = torch.aten.item %1217 : !torch.vtensor<[],f32> -> !torch.float
%1220 = torch.aten.item %1218 : !torch.vtensor<[],si8> -> !torch.int
%1221 = torch.aten._make_per_tensor_quantized_tensor %1216, %1219, %1220 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1222 = torch.aten.dequantize.self %1221 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_295 = torch.constant.int 0
%int0_296 = torch.constant.int 0
%int1_297 = torch.constant.int 1
%int1_298 = torch.constant.int 1
%int1_299 = torch.constant.int 1
%int1_300 = torch.constant.int 1
%int0_301 = torch.constant.int 0
%1223 = torch.prim.ListConstruct %int0_295, %int0_296 : (!torch.int, !torch.int) -> !torch.list<int>
%1224 = torch.prim.ListConstruct %int1_297, %int1_298 : (!torch.int, !torch.int) -> !torch.list<int>
%1225 = torch.prim.ListConstruct %int1_299, %int1_300 : (!torch.int, !torch.int) -> !torch.list<int>
%1226 = torch.prim.ListConstruct %int0_301, %int0_301 : (!torch.int, !torch.int) -> !torch.list<int>
%false_302 = torch.constant.bool false
%int1_303 = torch.constant.int 1
%1227 = torch.aten.convolution %1198, %1210, %1222, %1225, %1223, %1224, %false_302, %1226, %int1_303 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1228 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1229 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_304 = torch.constant.int 12
%1230 = torch.aten.item %1228 : !torch.vtensor<[],f32> -> !torch.float
%1231 = torch.aten.item %1229 : !torch.vtensor<[],si8> -> !torch.int
%1232 = torch.aten.quantize_per_tensor %1227, %1230, %1231, %int12_304 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1233 = torch.aten.int_repr %1232 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1234 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1235 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1236 = torch.aten.item %1234 : !torch.vtensor<[],f32> -> !torch.float
%1237 = torch.aten.item %1235 : !torch.vtensor<[],si8> -> !torch.int
%1238 = torch.aten._make_per_tensor_quantized_tensor %1233, %1236, %1237 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1239 = torch.aten.dequantize.self %1238 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_305 = torch.constant.int 1
%1240 = torch.aten.add.Tensor %1239, %1114, %int1_305 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1241 = torch.aten.relu %1240 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%1242 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1243 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_306 = torch.constant.int 12
%1244 = torch.aten.item %1242 : !torch.vtensor<[],f32> -> !torch.float
%1245 = torch.aten.item %1243 : !torch.vtensor<[],si8> -> !torch.int
%1246 = torch.aten.quantize_per_tensor %1241, %1244, %1245, %int12_306 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1247 = torch.aten.int_repr %1246 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1248 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1249 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1250 = torch.aten.item %1248 : !torch.vtensor<[],f32> -> !torch.float
%1251 = torch.aten.item %1249 : !torch.vtensor<[],si8> -> !torch.int
%1252 = torch.aten._make_per_tensor_quantized_tensor %1247, %1250, %1251 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1253 = torch.aten.dequantize.self %1252 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%1254 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1255 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_307 = torch.constant.int 12
%1256 = torch.aten.item %1254 : !torch.vtensor<[],f32> -> !torch.float
%1257 = torch.aten.item %1255 : !torch.vtensor<[],si8> -> !torch.int
%1258 = torch.aten.quantize_per_tensor %48, %1256, %1257, %int12_307 : !torch.vtensor<[256,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,1,1],!torch.qint8>
%1259 = torch.aten.int_repr %1258 : !torch.vtensor<[256,512,1,1],!torch.qint8> -> !torch.vtensor<[256,512,1,1],si8>
%1260 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1261 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1262 = torch.aten.item %1260 : !torch.vtensor<[],f32> -> !torch.float
%1263 = torch.aten.item %1261 : !torch.vtensor<[],si8> -> !torch.int
%1264 = torch.aten._make_per_tensor_quantized_tensor %1259, %1262, %1263 : !torch.vtensor<[256,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,1,1],!torch.qint8>
%1265 = torch.aten.dequantize.self %1264 : !torch.vtensor<[256,512,1,1],!torch.qint8> -> !torch.vtensor<[256,512,1,1],f32>
%1266 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1267 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_308 = torch.constant.int 12
%1268 = torch.aten.item %1266 : !torch.vtensor<[],f32> -> !torch.float
%1269 = torch.aten.item %1267 : !torch.vtensor<[],si8> -> !torch.int
%1270 = torch.aten.quantize_per_tensor %49, %1268, %1269, %int12_308 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1271 = torch.aten.int_repr %1270 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1272 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1273 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1274 = torch.aten.item %1272 : !torch.vtensor<[],f32> -> !torch.float
%1275 = torch.aten.item %1273 : !torch.vtensor<[],si8> -> !torch.int
%1276 = torch.aten._make_per_tensor_quantized_tensor %1271, %1274, %1275 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1277 = torch.aten.dequantize.self %1276 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_309 = torch.constant.int 0
%int0_310 = torch.constant.int 0
%int1_311 = torch.constant.int 1
%int1_312 = torch.constant.int 1
%int1_313 = torch.constant.int 1
%int1_314 = torch.constant.int 1
%int0_315 = torch.constant.int 0
%1278 = torch.prim.ListConstruct %int0_309, %int0_310 : (!torch.int, !torch.int) -> !torch.list<int>
%1279 = torch.prim.ListConstruct %int1_311, %int1_312 : (!torch.int, !torch.int) -> !torch.list<int>
%1280 = torch.prim.ListConstruct %int1_313, %int1_314 : (!torch.int, !torch.int) -> !torch.list<int>
%1281 = torch.prim.ListConstruct %int0_315, %int0_315 : (!torch.int, !torch.int) -> !torch.list<int>
%false_316 = torch.constant.bool false
%int1_317 = torch.constant.int 1
%1282 = torch.aten.convolution %1253, %1265, %1277, %1280, %1278, %1279, %false_316, %1281, %int1_317 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[256,512,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1283 = torch.aten.relu %1282 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1284 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1285 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_318 = torch.constant.int 12
%1286 = torch.aten.item %1284 : !torch.vtensor<[],f32> -> !torch.float
%1287 = torch.aten.item %1285 : !torch.vtensor<[],si8> -> !torch.int
%1288 = torch.aten.quantize_per_tensor %1283, %1286, %1287, %int12_318 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1289 = torch.aten.int_repr %1288 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1290 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1291 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1292 = torch.aten.item %1290 : !torch.vtensor<[],f32> -> !torch.float
%1293 = torch.aten.item %1291 : !torch.vtensor<[],si8> -> !torch.int
%1294 = torch.aten._make_per_tensor_quantized_tensor %1289, %1292, %1293 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1295 = torch.aten.dequantize.self %1294 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1296 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1297 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_319 = torch.constant.int 12
%1298 = torch.aten.item %1296 : !torch.vtensor<[],f32> -> !torch.float
%1299 = torch.aten.item %1297 : !torch.vtensor<[],si8> -> !torch.int
%1300 = torch.aten.quantize_per_tensor %50, %1298, %1299, %int12_319 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1301 = torch.aten.int_repr %1300 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1302 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1303 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1304 = torch.aten.item %1302 : !torch.vtensor<[],f32> -> !torch.float
%1305 = torch.aten.item %1303 : !torch.vtensor<[],si8> -> !torch.int
%1306 = torch.aten._make_per_tensor_quantized_tensor %1301, %1304, %1305 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1307 = torch.aten.dequantize.self %1306 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1308 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1309 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_320 = torch.constant.int 12
%1310 = torch.aten.item %1308 : !torch.vtensor<[],f32> -> !torch.float
%1311 = torch.aten.item %1309 : !torch.vtensor<[],si8> -> !torch.int
%1312 = torch.aten.quantize_per_tensor %51, %1310, %1311, %int12_320 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1313 = torch.aten.int_repr %1312 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1314 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1315 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1316 = torch.aten.item %1314 : !torch.vtensor<[],f32> -> !torch.float
%1317 = torch.aten.item %1315 : !torch.vtensor<[],si8> -> !torch.int
%1318 = torch.aten._make_per_tensor_quantized_tensor %1313, %1316, %1317 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1319 = torch.aten.dequantize.self %1318 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_321 = torch.constant.int 1
%int1_322 = torch.constant.int 1
%int1_323 = torch.constant.int 1
%int1_324 = torch.constant.int 1
%int1_325 = torch.constant.int 1
%int1_326 = torch.constant.int 1
%int0_327 = torch.constant.int 0
%1320 = torch.prim.ListConstruct %int1_321, %int1_322 : (!torch.int, !torch.int) -> !torch.list<int>
%1321 = torch.prim.ListConstruct %int1_323, %int1_324 : (!torch.int, !torch.int) -> !torch.list<int>
%1322 = torch.prim.ListConstruct %int1_325, %int1_326 : (!torch.int, !torch.int) -> !torch.list<int>
%1323 = torch.prim.ListConstruct %int0_327, %int0_327 : (!torch.int, !torch.int) -> !torch.list<int>
%false_328 = torch.constant.bool false
%int1_329 = torch.constant.int 1
%1324 = torch.aten.convolution %1295, %1307, %1319, %1322, %1320, %1321, %false_328, %1323, %int1_329 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1325 = torch.aten.relu %1324 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1326 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1327 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_330 = torch.constant.int 12
%1328 = torch.aten.item %1326 : !torch.vtensor<[],f32> -> !torch.float
%1329 = torch.aten.item %1327 : !torch.vtensor<[],si8> -> !torch.int
%1330 = torch.aten.quantize_per_tensor %1325, %1328, %1329, %int12_330 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1331 = torch.aten.int_repr %1330 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1332 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1333 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1334 = torch.aten.item %1332 : !torch.vtensor<[],f32> -> !torch.float
%1335 = torch.aten.item %1333 : !torch.vtensor<[],si8> -> !torch.int
%1336 = torch.aten._make_per_tensor_quantized_tensor %1331, %1334, %1335 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1337 = torch.aten.dequantize.self %1336 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1338 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1339 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_331 = torch.constant.int 12
%1340 = torch.aten.item %1338 : !torch.vtensor<[],f32> -> !torch.float
%1341 = torch.aten.item %1339 : !torch.vtensor<[],si8> -> !torch.int
%1342 = torch.aten.quantize_per_tensor %52, %1340, %1341, %int12_331 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1343 = torch.aten.int_repr %1342 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1344 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1345 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1346 = torch.aten.item %1344 : !torch.vtensor<[],f32> -> !torch.float
%1347 = torch.aten.item %1345 : !torch.vtensor<[],si8> -> !torch.int
%1348 = torch.aten._make_per_tensor_quantized_tensor %1343, %1346, %1347 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1349 = torch.aten.dequantize.self %1348 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1350 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1351 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_332 = torch.constant.int 12
%1352 = torch.aten.item %1350 : !torch.vtensor<[],f32> -> !torch.float
%1353 = torch.aten.item %1351 : !torch.vtensor<[],si8> -> !torch.int
%1354 = torch.aten.quantize_per_tensor %53, %1352, %1353, %int12_332 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1355 = torch.aten.int_repr %1354 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1356 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1357 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1358 = torch.aten.item %1356 : !torch.vtensor<[],f32> -> !torch.float
%1359 = torch.aten.item %1357 : !torch.vtensor<[],si8> -> !torch.int
%1360 = torch.aten._make_per_tensor_quantized_tensor %1355, %1358, %1359 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1361 = torch.aten.dequantize.self %1360 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_333 = torch.constant.int 0
%int0_334 = torch.constant.int 0
%int1_335 = torch.constant.int 1
%int1_336 = torch.constant.int 1
%int1_337 = torch.constant.int 1
%int1_338 = torch.constant.int 1
%int0_339 = torch.constant.int 0
%1362 = torch.prim.ListConstruct %int0_333, %int0_334 : (!torch.int, !torch.int) -> !torch.list<int>
%1363 = torch.prim.ListConstruct %int1_335, %int1_336 : (!torch.int, !torch.int) -> !torch.list<int>
%1364 = torch.prim.ListConstruct %int1_337, %int1_338 : (!torch.int, !torch.int) -> !torch.list<int>
%1365 = torch.prim.ListConstruct %int0_339, %int0_339 : (!torch.int, !torch.int) -> !torch.list<int>
%false_340 = torch.constant.bool false
%int1_341 = torch.constant.int 1
%1366 = torch.aten.convolution %1337, %1349, %1361, %1364, %1362, %1363, %false_340, %1365, %int1_341 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1367 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1368 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_342 = torch.constant.int 12
%1369 = torch.aten.item %1367 : !torch.vtensor<[],f32> -> !torch.float
%1370 = torch.aten.item %1368 : !torch.vtensor<[],si8> -> !torch.int
%1371 = torch.aten.quantize_per_tensor %1366, %1369, %1370, %int12_342 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1372 = torch.aten.int_repr %1371 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1373 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1374 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1375 = torch.aten.item %1373 : !torch.vtensor<[],f32> -> !torch.float
%1376 = torch.aten.item %1374 : !torch.vtensor<[],si8> -> !torch.int
%1377 = torch.aten._make_per_tensor_quantized_tensor %1372, %1375, %1376 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1378 = torch.aten.dequantize.self %1377 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%1379 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1380 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_343 = torch.constant.int 12
%1381 = torch.aten.item %1379 : !torch.vtensor<[],f32> -> !torch.float
%1382 = torch.aten.item %1380 : !torch.vtensor<[],si8> -> !torch.int
%1383 = torch.aten.quantize_per_tensor %54, %1381, %1382, %int12_343 : !torch.vtensor<[1024,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,512,1,1],!torch.qint8>
%1384 = torch.aten.int_repr %1383 : !torch.vtensor<[1024,512,1,1],!torch.qint8> -> !torch.vtensor<[1024,512,1,1],si8>
%1385 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1386 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1387 = torch.aten.item %1385 : !torch.vtensor<[],f32> -> !torch.float
%1388 = torch.aten.item %1386 : !torch.vtensor<[],si8> -> !torch.int
%1389 = torch.aten._make_per_tensor_quantized_tensor %1384, %1387, %1388 : !torch.vtensor<[1024,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,512,1,1],!torch.qint8>
%1390 = torch.aten.dequantize.self %1389 : !torch.vtensor<[1024,512,1,1],!torch.qint8> -> !torch.vtensor<[1024,512,1,1],f32>
%1391 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1392 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_344 = torch.constant.int 12
%1393 = torch.aten.item %1391 : !torch.vtensor<[],f32> -> !torch.float
%1394 = torch.aten.item %1392 : !torch.vtensor<[],si8> -> !torch.int
%1395 = torch.aten.quantize_per_tensor %55, %1393, %1394, %int12_344 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1396 = torch.aten.int_repr %1395 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1397 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1398 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1399 = torch.aten.item %1397 : !torch.vtensor<[],f32> -> !torch.float
%1400 = torch.aten.item %1398 : !torch.vtensor<[],si8> -> !torch.int
%1401 = torch.aten._make_per_tensor_quantized_tensor %1396, %1399, %1400 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1402 = torch.aten.dequantize.self %1401 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_345 = torch.constant.int 0
%int0_346 = torch.constant.int 0
%int1_347 = torch.constant.int 1
%int1_348 = torch.constant.int 1
%int1_349 = torch.constant.int 1
%int1_350 = torch.constant.int 1
%int0_351 = torch.constant.int 0
%1403 = torch.prim.ListConstruct %int0_345, %int0_346 : (!torch.int, !torch.int) -> !torch.list<int>
%1404 = torch.prim.ListConstruct %int1_347, %int1_348 : (!torch.int, !torch.int) -> !torch.list<int>
%1405 = torch.prim.ListConstruct %int1_349, %int1_350 : (!torch.int, !torch.int) -> !torch.list<int>
%1406 = torch.prim.ListConstruct %int0_351, %int0_351 : (!torch.int, !torch.int) -> !torch.list<int>
%false_352 = torch.constant.bool false
%int1_353 = torch.constant.int 1
%1407 = torch.aten.convolution %1253, %1390, %1402, %1405, %1403, %1404, %false_352, %1406, %int1_353 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1024,512,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1408 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_354 = torch.constant.int 12
%1410 = torch.aten.item %1408 : !torch.vtensor<[],f32> -> !torch.float
%1411 = torch.aten.item %1409 : !torch.vtensor<[],si8> -> !torch.int
%1412 = torch.aten.quantize_per_tensor %1407, %1410, %1411, %int12_354 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1413 = torch.aten.int_repr %1412 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1414 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1415 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1416 = torch.aten.item %1414 : !torch.vtensor<[],f32> -> !torch.float
%1417 = torch.aten.item %1415 : !torch.vtensor<[],si8> -> !torch.int
%1418 = torch.aten._make_per_tensor_quantized_tensor %1413, %1416, %1417 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1419 = torch.aten.dequantize.self %1418 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%int1_355 = torch.constant.int 1
%1420 = torch.aten.add.Tensor %1378, %1419, %int1_355 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[1,1024,28,28],f32>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1421 = torch.aten.relu %1420 : !torch.vtensor<[1,1024,28,28],f32> -> !torch.vtensor<[1,1024,28,28],f32>
%1422 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_356 = torch.constant.int 12
%1424 = torch.aten.item %1422 : !torch.vtensor<[],f32> -> !torch.float
%1425 = torch.aten.item %1423 : !torch.vtensor<[],si8> -> !torch.int
%1426 = torch.aten.quantize_per_tensor %1421, %1424, %1425, %int12_356 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1427 = torch.aten.int_repr %1426 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1428 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1430 = torch.aten.item %1428 : !torch.vtensor<[],f32> -> !torch.float
%1431 = torch.aten.item %1429 : !torch.vtensor<[],si8> -> !torch.int
%1432 = torch.aten._make_per_tensor_quantized_tensor %1427, %1430, %1431 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1433 = torch.aten.dequantize.self %1432 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%1434 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_357 = torch.constant.int 12
%1436 = torch.aten.item %1434 : !torch.vtensor<[],f32> -> !torch.float
%1437 = torch.aten.item %1435 : !torch.vtensor<[],si8> -> !torch.int
%1438 = torch.aten.quantize_per_tensor %56, %1436, %1437, %int12_357 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1439 = torch.aten.int_repr %1438 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1440 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1442 = torch.aten.item %1440 : !torch.vtensor<[],f32> -> !torch.float
%1443 = torch.aten.item %1441 : !torch.vtensor<[],si8> -> !torch.int
%1444 = torch.aten._make_per_tensor_quantized_tensor %1439, %1442, %1443 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1445 = torch.aten.dequantize.self %1444 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1446 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_358 = torch.constant.int 12
%1448 = torch.aten.item %1446 : !torch.vtensor<[],f32> -> !torch.float
%1449 = torch.aten.item %1447 : !torch.vtensor<[],si8> -> !torch.int
%1450 = torch.aten.quantize_per_tensor %57, %1448, %1449, %int12_358 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1451 = torch.aten.int_repr %1450 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1452 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1453 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1454 = torch.aten.item %1452 : !torch.vtensor<[],f32> -> !torch.float
%1455 = torch.aten.item %1453 : !torch.vtensor<[],si8> -> !torch.int
%1456 = torch.aten._make_per_tensor_quantized_tensor %1451, %1454, %1455 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1457 = torch.aten.dequantize.self %1456 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_359 = torch.constant.int 0
%int0_360 = torch.constant.int 0
%int1_361 = torch.constant.int 1
%int1_362 = torch.constant.int 1
%int1_363 = torch.constant.int 1
%int1_364 = torch.constant.int 1
%int0_365 = torch.constant.int 0
%1458 = torch.prim.ListConstruct %int0_359, %int0_360 : (!torch.int, !torch.int) -> !torch.list<int>
%1459 = torch.prim.ListConstruct %int1_361, %int1_362 : (!torch.int, !torch.int) -> !torch.list<int>
%1460 = torch.prim.ListConstruct %int1_363, %int1_364 : (!torch.int, !torch.int) -> !torch.list<int>
%1461 = torch.prim.ListConstruct %int0_365, %int0_365 : (!torch.int, !torch.int) -> !torch.list<int>
%false_366 = torch.constant.bool false
%int1_367 = torch.constant.int 1
%1462 = torch.aten.convolution %1433, %1445, %1457, %1460, %1458, %1459, %false_366, %1461, %int1_367 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1463 = torch.aten.relu %1462 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1464 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1465 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_368 = torch.constant.int 12
%1466 = torch.aten.item %1464 : !torch.vtensor<[],f32> -> !torch.float
%1467 = torch.aten.item %1465 : !torch.vtensor<[],si8> -> !torch.int
%1468 = torch.aten.quantize_per_tensor %1463, %1466, %1467, %int12_368 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1469 = torch.aten.int_repr %1468 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1470 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1472 = torch.aten.item %1470 : !torch.vtensor<[],f32> -> !torch.float
%1473 = torch.aten.item %1471 : !torch.vtensor<[],si8> -> !torch.int
%1474 = torch.aten._make_per_tensor_quantized_tensor %1469, %1472, %1473 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1475 = torch.aten.dequantize.self %1474 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1476 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_369 = torch.constant.int 12
%1478 = torch.aten.item %1476 : !torch.vtensor<[],f32> -> !torch.float
%1479 = torch.aten.item %1477 : !torch.vtensor<[],si8> -> !torch.int
%1480 = torch.aten.quantize_per_tensor %58, %1478, %1479, %int12_369 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1481 = torch.aten.int_repr %1480 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1482 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1483 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1484 = torch.aten.item %1482 : !torch.vtensor<[],f32> -> !torch.float
%1485 = torch.aten.item %1483 : !torch.vtensor<[],si8> -> !torch.int
%1486 = torch.aten._make_per_tensor_quantized_tensor %1481, %1484, %1485 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1487 = torch.aten.dequantize.self %1486 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1488 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1489 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_370 = torch.constant.int 12
%1490 = torch.aten.item %1488 : !torch.vtensor<[],f32> -> !torch.float
%1491 = torch.aten.item %1489 : !torch.vtensor<[],si8> -> !torch.int
%1492 = torch.aten.quantize_per_tensor %59, %1490, %1491, %int12_370 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1493 = torch.aten.int_repr %1492 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1494 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1495 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1496 = torch.aten.item %1494 : !torch.vtensor<[],f32> -> !torch.float
%1497 = torch.aten.item %1495 : !torch.vtensor<[],si8> -> !torch.int
%1498 = torch.aten._make_per_tensor_quantized_tensor %1493, %1496, %1497 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1499 = torch.aten.dequantize.self %1498 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_371 = torch.constant.int 2
%int2_372 = torch.constant.int 2
%int2_373 = torch.constant.int 2
%int2_374 = torch.constant.int 2
%int1_375 = torch.constant.int 1
%int1_376 = torch.constant.int 1
%int0_377 = torch.constant.int 0
%1500 = torch.prim.ListConstruct %int2_371, %int2_372 : (!torch.int, !torch.int) -> !torch.list<int>
%1501 = torch.prim.ListConstruct %int2_373, %int2_374 : (!torch.int, !torch.int) -> !torch.list<int>
%1502 = torch.prim.ListConstruct %int1_375, %int1_376 : (!torch.int, !torch.int) -> !torch.list<int>
%1503 = torch.prim.ListConstruct %int0_377, %int0_377 : (!torch.int, !torch.int) -> !torch.list<int>
%false_378 = torch.constant.bool false
%int1_379 = torch.constant.int 1
%1504 = torch.aten.convolution %1475, %1487, %1499, %1502, %1500, %1501, %false_378, %1503, %int1_379 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1505 = torch.aten.relu %1504 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1506 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1507 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_380 = torch.constant.int 12
%1508 = torch.aten.item %1506 : !torch.vtensor<[],f32> -> !torch.float
%1509 = torch.aten.item %1507 : !torch.vtensor<[],si8> -> !torch.int
%1510 = torch.aten.quantize_per_tensor %1505, %1508, %1509, %int12_380 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1511 = torch.aten.int_repr %1510 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1512 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1513 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1514 = torch.aten.item %1512 : !torch.vtensor<[],f32> -> !torch.float
%1515 = torch.aten.item %1513 : !torch.vtensor<[],si8> -> !torch.int
%1516 = torch.aten._make_per_tensor_quantized_tensor %1511, %1514, %1515 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1517 = torch.aten.dequantize.self %1516 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1518 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1519 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_381 = torch.constant.int 12
%1520 = torch.aten.item %1518 : !torch.vtensor<[],f32> -> !torch.float
%1521 = torch.aten.item %1519 : !torch.vtensor<[],si8> -> !torch.int
%1522 = torch.aten.quantize_per_tensor %60, %1520, %1521, %int12_381 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1523 = torch.aten.int_repr %1522 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1524 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1525 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1526 = torch.aten.item %1524 : !torch.vtensor<[],f32> -> !torch.float
%1527 = torch.aten.item %1525 : !torch.vtensor<[],si8> -> !torch.int
%1528 = torch.aten._make_per_tensor_quantized_tensor %1523, %1526, %1527 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1529 = torch.aten.dequantize.self %1528 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1530 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1531 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_382 = torch.constant.int 12
%1532 = torch.aten.item %1530 : !torch.vtensor<[],f32> -> !torch.float
%1533 = torch.aten.item %1531 : !torch.vtensor<[],si8> -> !torch.int
%1534 = torch.aten.quantize_per_tensor %61, %1532, %1533, %int12_382 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1535 = torch.aten.int_repr %1534 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1536 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1537 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1538 = torch.aten.item %1536 : !torch.vtensor<[],f32> -> !torch.float
%1539 = torch.aten.item %1537 : !torch.vtensor<[],si8> -> !torch.int
%1540 = torch.aten._make_per_tensor_quantized_tensor %1535, %1538, %1539 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1541 = torch.aten.dequantize.self %1540 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_383 = torch.constant.int 0
%int0_384 = torch.constant.int 0
%int1_385 = torch.constant.int 1
%int1_386 = torch.constant.int 1
%int1_387 = torch.constant.int 1
%int1_388 = torch.constant.int 1
%int0_389 = torch.constant.int 0
%1542 = torch.prim.ListConstruct %int0_383, %int0_384 : (!torch.int, !torch.int) -> !torch.list<int>
%1543 = torch.prim.ListConstruct %int1_385, %int1_386 : (!torch.int, !torch.int) -> !torch.list<int>
%1544 = torch.prim.ListConstruct %int1_387, %int1_388 : (!torch.int, !torch.int) -> !torch.list<int>
%1545 = torch.prim.ListConstruct %int0_389, %int0_389 : (!torch.int, !torch.int) -> !torch.list<int>
%false_390 = torch.constant.bool false
%int1_391 = torch.constant.int 1
%1546 = torch.aten.convolution %1517, %1529, %1541, %1544, %1542, %1543, %false_390, %1545, %int1_391 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1547 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1548 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_392 = torch.constant.int 12
%1549 = torch.aten.item %1547 : !torch.vtensor<[],f32> -> !torch.float
%1550 = torch.aten.item %1548 : !torch.vtensor<[],si8> -> !torch.int
%1551 = torch.aten.quantize_per_tensor %1546, %1549, %1550, %int12_392 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1552 = torch.aten.int_repr %1551 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1553 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1554 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1555 = torch.aten.item %1553 : !torch.vtensor<[],f32> -> !torch.float
%1556 = torch.aten.item %1554 : !torch.vtensor<[],si8> -> !torch.int
%1557 = torch.aten._make_per_tensor_quantized_tensor %1552, %1555, %1556 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1558 = torch.aten.dequantize.self %1557 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%int1_393 = torch.constant.int 1
%1559 = torch.aten.add.Tensor %1558, %1433, %int1_393 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[1,1024,28,28],f32>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1560 = torch.aten.relu %1559 : !torch.vtensor<[1,1024,28,28],f32> -> !torch.vtensor<[1,1024,28,28],f32>
%1561 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1562 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_394 = torch.constant.int 12
%1563 = torch.aten.item %1561 : !torch.vtensor<[],f32> -> !torch.float
%1564 = torch.aten.item %1562 : !torch.vtensor<[],si8> -> !torch.int
%1565 = torch.aten.quantize_per_tensor %1560, %1563, %1564, %int12_394 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1566 = torch.aten.int_repr %1565 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1567 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1568 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1569 = torch.aten.item %1567 : !torch.vtensor<[],f32> -> !torch.float
%1570 = torch.aten.item %1568 : !torch.vtensor<[],si8> -> !torch.int
%1571 = torch.aten._make_per_tensor_quantized_tensor %1566, %1569, %1570 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1572 = torch.aten.dequantize.self %1571 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%1573 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1574 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_395 = torch.constant.int 12
%1575 = torch.aten.item %1573 : !torch.vtensor<[],f32> -> !torch.float
%1576 = torch.aten.item %1574 : !torch.vtensor<[],si8> -> !torch.int
%1577 = torch.aten.quantize_per_tensor %62, %1575, %1576, %int12_395 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1578 = torch.aten.int_repr %1577 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1579 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1580 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1581 = torch.aten.item %1579 : !torch.vtensor<[],f32> -> !torch.float
%1582 = torch.aten.item %1580 : !torch.vtensor<[],si8> -> !torch.int
%1583 = torch.aten._make_per_tensor_quantized_tensor %1578, %1581, %1582 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1584 = torch.aten.dequantize.self %1583 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1585 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1586 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_396 = torch.constant.int 12
%1587 = torch.aten.item %1585 : !torch.vtensor<[],f32> -> !torch.float
%1588 = torch.aten.item %1586 : !torch.vtensor<[],si8> -> !torch.int
%1589 = torch.aten.quantize_per_tensor %63, %1587, %1588, %int12_396 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1590 = torch.aten.int_repr %1589 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1591 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1592 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1593 = torch.aten.item %1591 : !torch.vtensor<[],f32> -> !torch.float
%1594 = torch.aten.item %1592 : !torch.vtensor<[],si8> -> !torch.int
%1595 = torch.aten._make_per_tensor_quantized_tensor %1590, %1593, %1594 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1596 = torch.aten.dequantize.self %1595 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_397 = torch.constant.int 0
%int0_398 = torch.constant.int 0
%int1_399 = torch.constant.int 1
%int1_400 = torch.constant.int 1
%int1_401 = torch.constant.int 1
%int1_402 = torch.constant.int 1
%int0_403 = torch.constant.int 0
%1597 = torch.prim.ListConstruct %int0_397, %int0_398 : (!torch.int, !torch.int) -> !torch.list<int>
%1598 = torch.prim.ListConstruct %int1_399, %int1_400 : (!torch.int, !torch.int) -> !torch.list<int>
%1599 = torch.prim.ListConstruct %int1_401, %int1_402 : (!torch.int, !torch.int) -> !torch.list<int>
%1600 = torch.prim.ListConstruct %int0_403, %int0_403 : (!torch.int, !torch.int) -> !torch.list<int>
%false_404 = torch.constant.bool false
%int1_405 = torch.constant.int 1
%1601 = torch.aten.convolution %1572, %1584, %1596, %1599, %1597, %1598, %false_404, %1600, %int1_405 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1602 = torch.aten.relu %1601 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1603 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1604 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_406 = torch.constant.int 12
%1605 = torch.aten.item %1603 : !torch.vtensor<[],f32> -> !torch.float
%1606 = torch.aten.item %1604 : !torch.vtensor<[],si8> -> !torch.int
%1607 = torch.aten.quantize_per_tensor %1602, %1605, %1606, %int12_406 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1608 = torch.aten.int_repr %1607 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1609 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1610 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1611 = torch.aten.item %1609 : !torch.vtensor<[],f32> -> !torch.float
%1612 = torch.aten.item %1610 : !torch.vtensor<[],si8> -> !torch.int
%1613 = torch.aten._make_per_tensor_quantized_tensor %1608, %1611, %1612 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1614 = torch.aten.dequantize.self %1613 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1615 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1616 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_407 = torch.constant.int 12
%1617 = torch.aten.item %1615 : !torch.vtensor<[],f32> -> !torch.float
%1618 = torch.aten.item %1616 : !torch.vtensor<[],si8> -> !torch.int
%1619 = torch.aten.quantize_per_tensor %64, %1617, %1618, %int12_407 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1620 = torch.aten.int_repr %1619 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1621 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1622 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1623 = torch.aten.item %1621 : !torch.vtensor<[],f32> -> !torch.float
%1624 = torch.aten.item %1622 : !torch.vtensor<[],si8> -> !torch.int
%1625 = torch.aten._make_per_tensor_quantized_tensor %1620, %1623, %1624 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1626 = torch.aten.dequantize.self %1625 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1627 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1628 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_408 = torch.constant.int 12
%1629 = torch.aten.item %1627 : !torch.vtensor<[],f32> -> !torch.float
%1630 = torch.aten.item %1628 : !torch.vtensor<[],si8> -> !torch.int
%1631 = torch.aten.quantize_per_tensor %65, %1629, %1630, %int12_408 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1632 = torch.aten.int_repr %1631 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1633 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1634 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1635 = torch.aten.item %1633 : !torch.vtensor<[],f32> -> !torch.float
%1636 = torch.aten.item %1634 : !torch.vtensor<[],si8> -> !torch.int
%1637 = torch.aten._make_per_tensor_quantized_tensor %1632, %1635, %1636 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1638 = torch.aten.dequantize.self %1637 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_409 = torch.constant.int 2
%int2_410 = torch.constant.int 2
%int2_411 = torch.constant.int 2
%int2_412 = torch.constant.int 2
%int1_413 = torch.constant.int 1
%int1_414 = torch.constant.int 1
%int0_415 = torch.constant.int 0
%1639 = torch.prim.ListConstruct %int2_409, %int2_410 : (!torch.int, !torch.int) -> !torch.list<int>
%1640 = torch.prim.ListConstruct %int2_411, %int2_412 : (!torch.int, !torch.int) -> !torch.list<int>
%1641 = torch.prim.ListConstruct %int1_413, %int1_414 : (!torch.int, !torch.int) -> !torch.list<int>
%1642 = torch.prim.ListConstruct %int0_415, %int0_415 : (!torch.int, !torch.int) -> !torch.list<int>
%false_416 = torch.constant.bool false
%int1_417 = torch.constant.int 1
%1643 = torch.aten.convolution %1614, %1626, %1638, %1641, %1639, %1640, %false_416, %1642, %int1_417 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1644 = torch.aten.relu %1643 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1645 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_418 = torch.constant.int 12
%1647 = torch.aten.item %1645 : !torch.vtensor<[],f32> -> !torch.float
%1648 = torch.aten.item %1646 : !torch.vtensor<[],si8> -> !torch.int
%1649 = torch.aten.quantize_per_tensor %1644, %1647, %1648, %int12_418 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1650 = torch.aten.int_repr %1649 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1651 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1652 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1653 = torch.aten.item %1651 : !torch.vtensor<[],f32> -> !torch.float
%1654 = torch.aten.item %1652 : !torch.vtensor<[],si8> -> !torch.int
%1655 = torch.aten._make_per_tensor_quantized_tensor %1650, %1653, %1654 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1656 = torch.aten.dequantize.self %1655 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1657 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1658 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_419 = torch.constant.int 12
%1659 = torch.aten.item %1657 : !torch.vtensor<[],f32> -> !torch.float
%1660 = torch.aten.item %1658 : !torch.vtensor<[],si8> -> !torch.int
%1661 = torch.aten.quantize_per_tensor %66, %1659, %1660, %int12_419 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1662 = torch.aten.int_repr %1661 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1663 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1664 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1665 = torch.aten.item %1663 : !torch.vtensor<[],f32> -> !torch.float
%1666 = torch.aten.item %1664 : !torch.vtensor<[],si8> -> !torch.int
%1667 = torch.aten._make_per_tensor_quantized_tensor %1662, %1665, %1666 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1668 = torch.aten.dequantize.self %1667 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1669 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1670 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_420 = torch.constant.int 12
%1671 = torch.aten.item %1669 : !torch.vtensor<[],f32> -> !torch.float
%1672 = torch.aten.item %1670 : !torch.vtensor<[],si8> -> !torch.int
%1673 = torch.aten.quantize_per_tensor %67, %1671, %1672, %int12_420 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1674 = torch.aten.int_repr %1673 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1675 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1676 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1677 = torch.aten.item %1675 : !torch.vtensor<[],f32> -> !torch.float
%1678 = torch.aten.item %1676 : !torch.vtensor<[],si8> -> !torch.int
%1679 = torch.aten._make_per_tensor_quantized_tensor %1674, %1677, %1678 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1680 = torch.aten.dequantize.self %1679 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_421 = torch.constant.int 0
%int0_422 = torch.constant.int 0
%int1_423 = torch.constant.int 1
%int1_424 = torch.constant.int 1
%int1_425 = torch.constant.int 1
%int1_426 = torch.constant.int 1
%int0_427 = torch.constant.int 0
%1681 = torch.prim.ListConstruct %int0_421, %int0_422 : (!torch.int, !torch.int) -> !torch.list<int>
%1682 = torch.prim.ListConstruct %int1_423, %int1_424 : (!torch.int, !torch.int) -> !torch.list<int>
%1683 = torch.prim.ListConstruct %int1_425, %int1_426 : (!torch.int, !torch.int) -> !torch.list<int>
%1684 = torch.prim.ListConstruct %int0_427, %int0_427 : (!torch.int, !torch.int) -> !torch.list<int>
%false_428 = torch.constant.bool false
%int1_429 = torch.constant.int 1
%1685 = torch.aten.convolution %1656, %1668, %1680, %1683, %1681, %1682, %false_428, %1684, %int1_429 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1686 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1687 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_430 = torch.constant.int 12
%1688 = torch.aten.item %1686 : !torch.vtensor<[],f32> -> !torch.float
%1689 = torch.aten.item %1687 : !torch.vtensor<[],si8> -> !torch.int
%1690 = torch.aten.quantize_per_tensor %1685, %1688, %1689, %int12_430 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1691 = torch.aten.int_repr %1690 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1692 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1693 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1694 = torch.aten.item %1692 : !torch.vtensor<[],f32> -> !torch.float
%1695 = torch.aten.item %1693 : !torch.vtensor<[],si8> -> !torch.int
%1696 = torch.aten._make_per_tensor_quantized_tensor %1691, %1694, %1695 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1697 = torch.aten.dequantize.self %1696 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%int1_431 = torch.constant.int 1
%1698 = torch.aten.add.Tensor %1697, %1572, %int1_431 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[1,1024,28,28],f32>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1699 = torch.aten.relu %1698 : !torch.vtensor<[1,1024,28,28],f32> -> !torch.vtensor<[1,1024,28,28],f32>
%1700 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1701 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_432 = torch.constant.int 12
%1702 = torch.aten.item %1700 : !torch.vtensor<[],f32> -> !torch.float
%1703 = torch.aten.item %1701 : !torch.vtensor<[],si8> -> !torch.int
%1704 = torch.aten.quantize_per_tensor %1699, %1702, %1703, %int12_432 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1705 = torch.aten.int_repr %1704 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1706 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1707 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1708 = torch.aten.item %1706 : !torch.vtensor<[],f32> -> !torch.float
%1709 = torch.aten.item %1707 : !torch.vtensor<[],si8> -> !torch.int
%1710 = torch.aten._make_per_tensor_quantized_tensor %1705, %1708, %1709 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1711 = torch.aten.dequantize.self %1710 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%1712 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1713 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_433 = torch.constant.int 12
%1714 = torch.aten.item %1712 : !torch.vtensor<[],f32> -> !torch.float
%1715 = torch.aten.item %1713 : !torch.vtensor<[],si8> -> !torch.int
%1716 = torch.aten.quantize_per_tensor %68, %1714, %1715, %int12_433 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1717 = torch.aten.int_repr %1716 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1718 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1719 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1720 = torch.aten.item %1718 : !torch.vtensor<[],f32> -> !torch.float
%1721 = torch.aten.item %1719 : !torch.vtensor<[],si8> -> !torch.int
%1722 = torch.aten._make_per_tensor_quantized_tensor %1717, %1720, %1721 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1723 = torch.aten.dequantize.self %1722 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1724 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1725 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_434 = torch.constant.int 12
%1726 = torch.aten.item %1724 : !torch.vtensor<[],f32> -> !torch.float
%1727 = torch.aten.item %1725 : !torch.vtensor<[],si8> -> !torch.int
%1728 = torch.aten.quantize_per_tensor %69, %1726, %1727, %int12_434 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1729 = torch.aten.int_repr %1728 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1730 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1731 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1732 = torch.aten.item %1730 : !torch.vtensor<[],f32> -> !torch.float
%1733 = torch.aten.item %1731 : !torch.vtensor<[],si8> -> !torch.int
%1734 = torch.aten._make_per_tensor_quantized_tensor %1729, %1732, %1733 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1735 = torch.aten.dequantize.self %1734 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_435 = torch.constant.int 0
%int0_436 = torch.constant.int 0
%int1_437 = torch.constant.int 1
%int1_438 = torch.constant.int 1
%int1_439 = torch.constant.int 1
%int1_440 = torch.constant.int 1
%int0_441 = torch.constant.int 0
%1736 = torch.prim.ListConstruct %int0_435, %int0_436 : (!torch.int, !torch.int) -> !torch.list<int>
%1737 = torch.prim.ListConstruct %int1_437, %int1_438 : (!torch.int, !torch.int) -> !torch.list<int>
%1738 = torch.prim.ListConstruct %int1_439, %int1_440 : (!torch.int, !torch.int) -> !torch.list<int>
%1739 = torch.prim.ListConstruct %int0_441, %int0_441 : (!torch.int, !torch.int) -> !torch.list<int>
%false_442 = torch.constant.bool false
%int1_443 = torch.constant.int 1
%1740 = torch.aten.convolution %1711, %1723, %1735, %1738, %1736, %1737, %false_442, %1739, %int1_443 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1741 = torch.aten.relu %1740 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1742 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1743 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_444 = torch.constant.int 12
%1744 = torch.aten.item %1742 : !torch.vtensor<[],f32> -> !torch.float
%1745 = torch.aten.item %1743 : !torch.vtensor<[],si8> -> !torch.int
%1746 = torch.aten.quantize_per_tensor %1741, %1744, %1745, %int12_444 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1747 = torch.aten.int_repr %1746 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1748 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1749 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1750 = torch.aten.item %1748 : !torch.vtensor<[],f32> -> !torch.float
%1751 = torch.aten.item %1749 : !torch.vtensor<[],si8> -> !torch.int
%1752 = torch.aten._make_per_tensor_quantized_tensor %1747, %1750, %1751 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1753 = torch.aten.dequantize.self %1752 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1754 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1755 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_445 = torch.constant.int 12
%1756 = torch.aten.item %1754 : !torch.vtensor<[],f32> -> !torch.float
%1757 = torch.aten.item %1755 : !torch.vtensor<[],si8> -> !torch.int
%1758 = torch.aten.quantize_per_tensor %70, %1756, %1757, %int12_445 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1759 = torch.aten.int_repr %1758 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1760 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1761 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1762 = torch.aten.item %1760 : !torch.vtensor<[],f32> -> !torch.float
%1763 = torch.aten.item %1761 : !torch.vtensor<[],si8> -> !torch.int
%1764 = torch.aten._make_per_tensor_quantized_tensor %1759, %1762, %1763 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1765 = torch.aten.dequantize.self %1764 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1766 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1767 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_446 = torch.constant.int 12
%1768 = torch.aten.item %1766 : !torch.vtensor<[],f32> -> !torch.float
%1769 = torch.aten.item %1767 : !torch.vtensor<[],si8> -> !torch.int
%1770 = torch.aten.quantize_per_tensor %71, %1768, %1769, %int12_446 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1771 = torch.aten.int_repr %1770 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1772 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1773 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1774 = torch.aten.item %1772 : !torch.vtensor<[],f32> -> !torch.float
%1775 = torch.aten.item %1773 : !torch.vtensor<[],si8> -> !torch.int
%1776 = torch.aten._make_per_tensor_quantized_tensor %1771, %1774, %1775 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1777 = torch.aten.dequantize.self %1776 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_447 = torch.constant.int 2
%int2_448 = torch.constant.int 2
%int2_449 = torch.constant.int 2
%int2_450 = torch.constant.int 2
%int1_451 = torch.constant.int 1
%int1_452 = torch.constant.int 1
%int0_453 = torch.constant.int 0
%1778 = torch.prim.ListConstruct %int2_447, %int2_448 : (!torch.int, !torch.int) -> !torch.list<int>
%1779 = torch.prim.ListConstruct %int2_449, %int2_450 : (!torch.int, !torch.int) -> !torch.list<int>
%1780 = torch.prim.ListConstruct %int1_451, %int1_452 : (!torch.int, !torch.int) -> !torch.list<int>
%1781 = torch.prim.ListConstruct %int0_453, %int0_453 : (!torch.int, !torch.int) -> !torch.list<int>
%false_454 = torch.constant.bool false
%int1_455 = torch.constant.int 1
%1782 = torch.aten.convolution %1753, %1765, %1777, %1780, %1778, %1779, %false_454, %1781, %int1_455 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1783 = torch.aten.relu %1782 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1784 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1785 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_456 = torch.constant.int 12
%1786 = torch.aten.item %1784 : !torch.vtensor<[],f32> -> !torch.float
%1787 = torch.aten.item %1785 : !torch.vtensor<[],si8> -> !torch.int
%1788 = torch.aten.quantize_per_tensor %1783, %1786, %1787, %int12_456 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1789 = torch.aten.int_repr %1788 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1790 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1791 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1792 = torch.aten.item %1790 : !torch.vtensor<[],f32> -> !torch.float
%1793 = torch.aten.item %1791 : !torch.vtensor<[],si8> -> !torch.int
%1794 = torch.aten._make_per_tensor_quantized_tensor %1789, %1792, %1793 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1795 = torch.aten.dequantize.self %1794 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1796 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1797 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_457 = torch.constant.int 12
%1798 = torch.aten.item %1796 : !torch.vtensor<[],f32> -> !torch.float
%1799 = torch.aten.item %1797 : !torch.vtensor<[],si8> -> !torch.int
%1800 = torch.aten.quantize_per_tensor %72, %1798, %1799, %int12_457 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1801 = torch.aten.int_repr %1800 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1802 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1803 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1804 = torch.aten.item %1802 : !torch.vtensor<[],f32> -> !torch.float
%1805 = torch.aten.item %1803 : !torch.vtensor<[],si8> -> !torch.int
%1806 = torch.aten._make_per_tensor_quantized_tensor %1801, %1804, %1805 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1807 = torch.aten.dequantize.self %1806 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1808 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1809 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_458 = torch.constant.int 12
%1810 = torch.aten.item %1808 : !torch.vtensor<[],f32> -> !torch.float
%1811 = torch.aten.item %1809 : !torch.vtensor<[],si8> -> !torch.int
%1812 = torch.aten.quantize_per_tensor %73, %1810, %1811, %int12_458 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1813 = torch.aten.int_repr %1812 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1814 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1815 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1816 = torch.aten.item %1814 : !torch.vtensor<[],f32> -> !torch.float
%1817 = torch.aten.item %1815 : !torch.vtensor<[],si8> -> !torch.int
%1818 = torch.aten._make_per_tensor_quantized_tensor %1813, %1816, %1817 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1819 = torch.aten.dequantize.self %1818 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_459 = torch.constant.int 0
%int0_460 = torch.constant.int 0
%int1_461 = torch.constant.int 1
%int1_462 = torch.constant.int 1
%int1_463 = torch.constant.int 1
%int1_464 = torch.constant.int 1
%int0_465 = torch.constant.int 0
%1820 = torch.prim.ListConstruct %int0_459, %int0_460 : (!torch.int, !torch.int) -> !torch.list<int>
%1821 = torch.prim.ListConstruct %int1_461, %int1_462 : (!torch.int, !torch.int) -> !torch.list<int>
%1822 = torch.prim.ListConstruct %int1_463, %int1_464 : (!torch.int, !torch.int) -> !torch.list<int>
%1823 = torch.prim.ListConstruct %int0_465, %int0_465 : (!torch.int, !torch.int) -> !torch.list<int>
%false_466 = torch.constant.bool false
%int1_467 = torch.constant.int 1
%1824 = torch.aten.convolution %1795, %1807, %1819, %1822, %1820, %1821, %false_466, %1823, %int1_467 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1825 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1826 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_468 = torch.constant.int 12
%1827 = torch.aten.item %1825 : !torch.vtensor<[],f32> -> !torch.float
%1828 = torch.aten.item %1826 : !torch.vtensor<[],si8> -> !torch.int
%1829 = torch.aten.quantize_per_tensor %1824, %1827, %1828, %int12_468 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1830 = torch.aten.int_repr %1829 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1831 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1832 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1833 = torch.aten.item %1831 : !torch.vtensor<[],f32> -> !torch.float
%1834 = torch.aten.item %1832 : !torch.vtensor<[],si8> -> !torch.int
%1835 = torch.aten._make_per_tensor_quantized_tensor %1830, %1833, %1834 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1836 = torch.aten.dequantize.self %1835 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%int1_469 = torch.constant.int 1
%1837 = torch.aten.add.Tensor %1836, %1711, %int1_469 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[1,1024,28,28],f32>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1838 = torch.aten.relu %1837 : !torch.vtensor<[1,1024,28,28],f32> -> !torch.vtensor<[1,1024,28,28],f32>
%1839 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1840 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_470 = torch.constant.int 12
%1841 = torch.aten.item %1839 : !torch.vtensor<[],f32> -> !torch.float
%1842 = torch.aten.item %1840 : !torch.vtensor<[],si8> -> !torch.int
%1843 = torch.aten.quantize_per_tensor %1838, %1841, %1842, %int12_470 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1844 = torch.aten.int_repr %1843 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1845 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1846 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1847 = torch.aten.item %1845 : !torch.vtensor<[],f32> -> !torch.float
%1848 = torch.aten.item %1846 : !torch.vtensor<[],si8> -> !torch.int
%1849 = torch.aten._make_per_tensor_quantized_tensor %1844, %1847, %1848 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1850 = torch.aten.dequantize.self %1849 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%1851 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1852 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_471 = torch.constant.int 12
%1853 = torch.aten.item %1851 : !torch.vtensor<[],f32> -> !torch.float
%1854 = torch.aten.item %1852 : !torch.vtensor<[],si8> -> !torch.int
%1855 = torch.aten.quantize_per_tensor %74, %1853, %1854, %int12_471 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1856 = torch.aten.int_repr %1855 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1857 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1858 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1859 = torch.aten.item %1857 : !torch.vtensor<[],f32> -> !torch.float
%1860 = torch.aten.item %1858 : !torch.vtensor<[],si8> -> !torch.int
%1861 = torch.aten._make_per_tensor_quantized_tensor %1856, %1859, %1860 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1862 = torch.aten.dequantize.self %1861 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1863 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1864 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_472 = torch.constant.int 12
%1865 = torch.aten.item %1863 : !torch.vtensor<[],f32> -> !torch.float
%1866 = torch.aten.item %1864 : !torch.vtensor<[],si8> -> !torch.int
%1867 = torch.aten.quantize_per_tensor %75, %1865, %1866, %int12_472 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1868 = torch.aten.int_repr %1867 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1869 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1870 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1871 = torch.aten.item %1869 : !torch.vtensor<[],f32> -> !torch.float
%1872 = torch.aten.item %1870 : !torch.vtensor<[],si8> -> !torch.int
%1873 = torch.aten._make_per_tensor_quantized_tensor %1868, %1871, %1872 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1874 = torch.aten.dequantize.self %1873 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_473 = torch.constant.int 0
%int0_474 = torch.constant.int 0
%int1_475 = torch.constant.int 1
%int1_476 = torch.constant.int 1
%int1_477 = torch.constant.int 1
%int1_478 = torch.constant.int 1
%int0_479 = torch.constant.int 0
%1875 = torch.prim.ListConstruct %int0_473, %int0_474 : (!torch.int, !torch.int) -> !torch.list<int>
%1876 = torch.prim.ListConstruct %int1_475, %int1_476 : (!torch.int, !torch.int) -> !torch.list<int>
%1877 = torch.prim.ListConstruct %int1_477, %int1_478 : (!torch.int, !torch.int) -> !torch.list<int>
%1878 = torch.prim.ListConstruct %int0_479, %int0_479 : (!torch.int, !torch.int) -> !torch.list<int>
%false_480 = torch.constant.bool false
%int1_481 = torch.constant.int 1
%1879 = torch.aten.convolution %1850, %1862, %1874, %1877, %1875, %1876, %false_480, %1878, %int1_481 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1880 = torch.aten.relu %1879 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1881 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1882 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_482 = torch.constant.int 12
%1883 = torch.aten.item %1881 : !torch.vtensor<[],f32> -> !torch.float
%1884 = torch.aten.item %1882 : !torch.vtensor<[],si8> -> !torch.int
%1885 = torch.aten.quantize_per_tensor %1880, %1883, %1884, %int12_482 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1886 = torch.aten.int_repr %1885 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1887 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1888 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1889 = torch.aten.item %1887 : !torch.vtensor<[],f32> -> !torch.float
%1890 = torch.aten.item %1888 : !torch.vtensor<[],si8> -> !torch.int
%1891 = torch.aten._make_per_tensor_quantized_tensor %1886, %1889, %1890 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1892 = torch.aten.dequantize.self %1891 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1893 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1894 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_483 = torch.constant.int 12
%1895 = torch.aten.item %1893 : !torch.vtensor<[],f32> -> !torch.float
%1896 = torch.aten.item %1894 : !torch.vtensor<[],si8> -> !torch.int
%1897 = torch.aten.quantize_per_tensor %76, %1895, %1896, %int12_483 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1898 = torch.aten.int_repr %1897 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1899 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1900 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1901 = torch.aten.item %1899 : !torch.vtensor<[],f32> -> !torch.float
%1902 = torch.aten.item %1900 : !torch.vtensor<[],si8> -> !torch.int
%1903 = torch.aten._make_per_tensor_quantized_tensor %1898, %1901, %1902 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1904 = torch.aten.dequantize.self %1903 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1905 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1906 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_484 = torch.constant.int 12
%1907 = torch.aten.item %1905 : !torch.vtensor<[],f32> -> !torch.float
%1908 = torch.aten.item %1906 : !torch.vtensor<[],si8> -> !torch.int
%1909 = torch.aten.quantize_per_tensor %77, %1907, %1908, %int12_484 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1910 = torch.aten.int_repr %1909 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1911 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1912 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1913 = torch.aten.item %1911 : !torch.vtensor<[],f32> -> !torch.float
%1914 = torch.aten.item %1912 : !torch.vtensor<[],si8> -> !torch.int
%1915 = torch.aten._make_per_tensor_quantized_tensor %1910, %1913, %1914 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1916 = torch.aten.dequantize.self %1915 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_485 = torch.constant.int 2
%int2_486 = torch.constant.int 2
%int2_487 = torch.constant.int 2
%int2_488 = torch.constant.int 2
%int1_489 = torch.constant.int 1
%int1_490 = torch.constant.int 1
%int0_491 = torch.constant.int 0
%1917 = torch.prim.ListConstruct %int2_485, %int2_486 : (!torch.int, !torch.int) -> !torch.list<int>
%1918 = torch.prim.ListConstruct %int2_487, %int2_488 : (!torch.int, !torch.int) -> !torch.list<int>
%1919 = torch.prim.ListConstruct %int1_489, %int1_490 : (!torch.int, !torch.int) -> !torch.list<int>
%1920 = torch.prim.ListConstruct %int0_491, %int0_491 : (!torch.int, !torch.int) -> !torch.list<int>
%false_492 = torch.constant.bool false
%int1_493 = torch.constant.int 1
%1921 = torch.aten.convolution %1892, %1904, %1916, %1919, %1917, %1918, %false_492, %1920, %int1_493 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1922 = torch.aten.relu %1921 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1923 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1924 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_494 = torch.constant.int 12
%1925 = torch.aten.item %1923 : !torch.vtensor<[],f32> -> !torch.float
%1926 = torch.aten.item %1924 : !torch.vtensor<[],si8> -> !torch.int
%1927 = torch.aten.quantize_per_tensor %1922, %1925, %1926, %int12_494 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1928 = torch.aten.int_repr %1927 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1929 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1930 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1931 = torch.aten.item %1929 : !torch.vtensor<[],f32> -> !torch.float
%1932 = torch.aten.item %1930 : !torch.vtensor<[],si8> -> !torch.int
%1933 = torch.aten._make_per_tensor_quantized_tensor %1928, %1931, %1932 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1934 = torch.aten.dequantize.self %1933 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1935 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1936 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_495 = torch.constant.int 12
%1937 = torch.aten.item %1935 : !torch.vtensor<[],f32> -> !torch.float
%1938 = torch.aten.item %1936 : !torch.vtensor<[],si8> -> !torch.int
%1939 = torch.aten.quantize_per_tensor %78, %1937, %1938, %int12_495 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1940 = torch.aten.int_repr %1939 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1941 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1942 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1943 = torch.aten.item %1941 : !torch.vtensor<[],f32> -> !torch.float
%1944 = torch.aten.item %1942 : !torch.vtensor<[],si8> -> !torch.int
%1945 = torch.aten._make_per_tensor_quantized_tensor %1940, %1943, %1944 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1946 = torch.aten.dequantize.self %1945 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1947 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1948 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_496 = torch.constant.int 12
%1949 = torch.aten.item %1947 : !torch.vtensor<[],f32> -> !torch.float
%1950 = torch.aten.item %1948 : !torch.vtensor<[],si8> -> !torch.int
%1951 = torch.aten.quantize_per_tensor %79, %1949, %1950, %int12_496 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1952 = torch.aten.int_repr %1951 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1953 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1954 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1955 = torch.aten.item %1953 : !torch.vtensor<[],f32> -> !torch.float
%1956 = torch.aten.item %1954 : !torch.vtensor<[],si8> -> !torch.int
%1957 = torch.aten._make_per_tensor_quantized_tensor %1952, %1955, %1956 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1958 = torch.aten.dequantize.self %1957 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_497 = torch.constant.int 0
%int0_498 = torch.constant.int 0
%int1_499 = torch.constant.int 1
%int1_500 = torch.constant.int 1
%int1_501 = torch.constant.int 1
%int1_502 = torch.constant.int 1
%int0_503 = torch.constant.int 0
%1959 = torch.prim.ListConstruct %int0_497, %int0_498 : (!torch.int, !torch.int) -> !torch.list<int>
%1960 = torch.prim.ListConstruct %int1_499, %int1_500 : (!torch.int, !torch.int) -> !torch.list<int>
%1961 = torch.prim.ListConstruct %int1_501, %int1_502 : (!torch.int, !torch.int) -> !torch.list<int>
%1962 = torch.prim.ListConstruct %int0_503, %int0_503 : (!torch.int, !torch.int) -> !torch.list<int>
%false_504 = torch.constant.bool false
%int1_505 = torch.constant.int 1
%1963 = torch.aten.convolution %1934, %1946, %1958, %1961, %1959, %1960, %false_504, %1962, %int1_505 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1964 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1965 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_506 = torch.constant.int 12
%1966 = torch.aten.item %1964 : !torch.vtensor<[],f32> -> !torch.float
%1967 = torch.aten.item %1965 : !torch.vtensor<[],si8> -> !torch.int
%1968 = torch.aten.quantize_per_tensor %1963, %1966, %1967, %int12_506 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1969 = torch.aten.int_repr %1968 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1970 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1971 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1972 = torch.aten.item %1970 : !torch.vtensor<[],f32> -> !torch.float
%1973 = torch.aten.item %1971 : !torch.vtensor<[],si8> -> !torch.int
%1974 = torch.aten._make_per_tensor_quantized_tensor %1969, %1972, %1973 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1975 = torch.aten.dequantize.self %1974 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%int1_507 = torch.constant.int 1
%1976 = torch.aten.add.Tensor %1975, %1850, %int1_507 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[1,1024,28,28],f32>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%1977 = torch.aten.relu %1976 : !torch.vtensor<[1,1024,28,28],f32> -> !torch.vtensor<[1,1024,28,28],f32>
%1978 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1979 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_508 = torch.constant.int 12
%1980 = torch.aten.item %1978 : !torch.vtensor<[],f32> -> !torch.float
%1981 = torch.aten.item %1979 : !torch.vtensor<[],si8> -> !torch.int
%1982 = torch.aten.quantize_per_tensor %1977, %1980, %1981, %int12_508 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1983 = torch.aten.int_repr %1982 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%1984 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1985 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1986 = torch.aten.item %1984 : !torch.vtensor<[],f32> -> !torch.float
%1987 = torch.aten.item %1985 : !torch.vtensor<[],si8> -> !torch.int
%1988 = torch.aten._make_per_tensor_quantized_tensor %1983, %1986, %1987 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%1989 = torch.aten.dequantize.self %1988 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%1990 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1991 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_509 = torch.constant.int 12
%1992 = torch.aten.item %1990 : !torch.vtensor<[],f32> -> !torch.float
%1993 = torch.aten.item %1991 : !torch.vtensor<[],si8> -> !torch.int
%1994 = torch.aten.quantize_per_tensor %80, %1992, %1993, %int12_509 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1995 = torch.aten.int_repr %1994 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1996 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1997 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1998 = torch.aten.item %1996 : !torch.vtensor<[],f32> -> !torch.float
%1999 = torch.aten.item %1997 : !torch.vtensor<[],si8> -> !torch.int
%2000 = torch.aten._make_per_tensor_quantized_tensor %1995, %1998, %1999 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%2001 = torch.aten.dequantize.self %2000 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%2002 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2003 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_510 = torch.constant.int 12
%2004 = torch.aten.item %2002 : !torch.vtensor<[],f32> -> !torch.float
%2005 = torch.aten.item %2003 : !torch.vtensor<[],si8> -> !torch.int
%2006 = torch.aten.quantize_per_tensor %81, %2004, %2005, %int12_510 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2007 = torch.aten.int_repr %2006 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2008 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2009 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2010 = torch.aten.item %2008 : !torch.vtensor<[],f32> -> !torch.float
%2011 = torch.aten.item %2009 : !torch.vtensor<[],si8> -> !torch.int
%2012 = torch.aten._make_per_tensor_quantized_tensor %2007, %2010, %2011 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2013 = torch.aten.dequantize.self %2012 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_511 = torch.constant.int 0
%int0_512 = torch.constant.int 0
%int1_513 = torch.constant.int 1
%int1_514 = torch.constant.int 1
%int1_515 = torch.constant.int 1
%int1_516 = torch.constant.int 1
%int0_517 = torch.constant.int 0
%2014 = torch.prim.ListConstruct %int0_511, %int0_512 : (!torch.int, !torch.int) -> !torch.list<int>
%2015 = torch.prim.ListConstruct %int1_513, %int1_514 : (!torch.int, !torch.int) -> !torch.list<int>
%2016 = torch.prim.ListConstruct %int1_515, %int1_516 : (!torch.int, !torch.int) -> !torch.list<int>
%2017 = torch.prim.ListConstruct %int0_517, %int0_517 : (!torch.int, !torch.int) -> !torch.list<int>
%false_518 = torch.constant.bool false
%int1_519 = torch.constant.int 1
%2018 = torch.aten.convolution %1989, %2001, %2013, %2016, %2014, %2015, %false_518, %2017, %int1_519 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2019 = torch.aten.relu %2018 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2020 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2021 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_520 = torch.constant.int 12
%2022 = torch.aten.item %2020 : !torch.vtensor<[],f32> -> !torch.float
%2023 = torch.aten.item %2021 : !torch.vtensor<[],si8> -> !torch.int
%2024 = torch.aten.quantize_per_tensor %2019, %2022, %2023, %int12_520 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2025 = torch.aten.int_repr %2024 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%2026 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2027 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2028 = torch.aten.item %2026 : !torch.vtensor<[],f32> -> !torch.float
%2029 = torch.aten.item %2027 : !torch.vtensor<[],si8> -> !torch.int
%2030 = torch.aten._make_per_tensor_quantized_tensor %2025, %2028, %2029 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2031 = torch.aten.dequantize.self %2030 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%2032 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2033 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_521 = torch.constant.int 12
%2034 = torch.aten.item %2032 : !torch.vtensor<[],f32> -> !torch.float
%2035 = torch.aten.item %2033 : !torch.vtensor<[],si8> -> !torch.int
%2036 = torch.aten.quantize_per_tensor %82, %2034, %2035, %int12_521 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%2037 = torch.aten.int_repr %2036 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%2038 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2039 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2040 = torch.aten.item %2038 : !torch.vtensor<[],f32> -> !torch.float
%2041 = torch.aten.item %2039 : !torch.vtensor<[],si8> -> !torch.int
%2042 = torch.aten._make_per_tensor_quantized_tensor %2037, %2040, %2041 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%2043 = torch.aten.dequantize.self %2042 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%2044 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2045 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_522 = torch.constant.int 12
%2046 = torch.aten.item %2044 : !torch.vtensor<[],f32> -> !torch.float
%2047 = torch.aten.item %2045 : !torch.vtensor<[],si8> -> !torch.int
%2048 = torch.aten.quantize_per_tensor %83, %2046, %2047, %int12_522 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2049 = torch.aten.int_repr %2048 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2050 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2051 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2052 = torch.aten.item %2050 : !torch.vtensor<[],f32> -> !torch.float
%2053 = torch.aten.item %2051 : !torch.vtensor<[],si8> -> !torch.int
%2054 = torch.aten._make_per_tensor_quantized_tensor %2049, %2052, %2053 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2055 = torch.aten.dequantize.self %2054 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_523 = torch.constant.int 2
%int2_524 = torch.constant.int 2
%int2_525 = torch.constant.int 2
%int2_526 = torch.constant.int 2
%int1_527 = torch.constant.int 1
%int1_528 = torch.constant.int 1
%int0_529 = torch.constant.int 0
%2056 = torch.prim.ListConstruct %int2_523, %int2_524 : (!torch.int, !torch.int) -> !torch.list<int>
%2057 = torch.prim.ListConstruct %int2_525, %int2_526 : (!torch.int, !torch.int) -> !torch.list<int>
%2058 = torch.prim.ListConstruct %int1_527, %int1_528 : (!torch.int, !torch.int) -> !torch.list<int>
%2059 = torch.prim.ListConstruct %int0_529, %int0_529 : (!torch.int, !torch.int) -> !torch.list<int>
%false_530 = torch.constant.bool false
%int1_531 = torch.constant.int 1
%2060 = torch.aten.convolution %2031, %2043, %2055, %2058, %2056, %2057, %false_530, %2059, %int1_531 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2061 = torch.aten.relu %2060 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2062 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2063 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_532 = torch.constant.int 12
%2064 = torch.aten.item %2062 : !torch.vtensor<[],f32> -> !torch.float
%2065 = torch.aten.item %2063 : !torch.vtensor<[],si8> -> !torch.int
%2066 = torch.aten.quantize_per_tensor %2061, %2064, %2065, %int12_532 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2067 = torch.aten.int_repr %2066 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%2068 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2069 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2070 = torch.aten.item %2068 : !torch.vtensor<[],f32> -> !torch.float
%2071 = torch.aten.item %2069 : !torch.vtensor<[],si8> -> !torch.int
%2072 = torch.aten._make_per_tensor_quantized_tensor %2067, %2070, %2071 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2073 = torch.aten.dequantize.self %2072 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%2074 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2075 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_533 = torch.constant.int 12
%2076 = torch.aten.item %2074 : !torch.vtensor<[],f32> -> !torch.float
%2077 = torch.aten.item %2075 : !torch.vtensor<[],si8> -> !torch.int
%2078 = torch.aten.quantize_per_tensor %84, %2076, %2077, %int12_533 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%2079 = torch.aten.int_repr %2078 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%2080 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2081 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2082 = torch.aten.item %2080 : !torch.vtensor<[],f32> -> !torch.float
%2083 = torch.aten.item %2081 : !torch.vtensor<[],si8> -> !torch.int
%2084 = torch.aten._make_per_tensor_quantized_tensor %2079, %2082, %2083 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%2085 = torch.aten.dequantize.self %2084 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%2086 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2087 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_534 = torch.constant.int 12
%2088 = torch.aten.item %2086 : !torch.vtensor<[],f32> -> !torch.float
%2089 = torch.aten.item %2087 : !torch.vtensor<[],si8> -> !torch.int
%2090 = torch.aten.quantize_per_tensor %85, %2088, %2089, %int12_534 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%2091 = torch.aten.int_repr %2090 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%2092 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2093 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2094 = torch.aten.item %2092 : !torch.vtensor<[],f32> -> !torch.float
%2095 = torch.aten.item %2093 : !torch.vtensor<[],si8> -> !torch.int
%2096 = torch.aten._make_per_tensor_quantized_tensor %2091, %2094, %2095 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%2097 = torch.aten.dequantize.self %2096 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_535 = torch.constant.int 0
%int0_536 = torch.constant.int 0
%int1_537 = torch.constant.int 1
%int1_538 = torch.constant.int 1
%int1_539 = torch.constant.int 1
%int1_540 = torch.constant.int 1
%int0_541 = torch.constant.int 0
%2098 = torch.prim.ListConstruct %int0_535, %int0_536 : (!torch.int, !torch.int) -> !torch.list<int>
%2099 = torch.prim.ListConstruct %int1_537, %int1_538 : (!torch.int, !torch.int) -> !torch.list<int>
%2100 = torch.prim.ListConstruct %int1_539, %int1_540 : (!torch.int, !torch.int) -> !torch.list<int>
%2101 = torch.prim.ListConstruct %int0_541, %int0_541 : (!torch.int, !torch.int) -> !torch.list<int>
%false_542 = torch.constant.bool false
%int1_543 = torch.constant.int 1
%2102 = torch.aten.convolution %2073, %2085, %2097, %2100, %2098, %2099, %false_542, %2101, %int1_543 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%2103 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2104 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_544 = torch.constant.int 12
%2105 = torch.aten.item %2103 : !torch.vtensor<[],f32> -> !torch.float
%2106 = torch.aten.item %2104 : !torch.vtensor<[],si8> -> !torch.int
%2107 = torch.aten.quantize_per_tensor %2102, %2105, %2106, %int12_544 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%2108 = torch.aten.int_repr %2107 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%2109 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2111 = torch.aten.item %2109 : !torch.vtensor<[],f32> -> !torch.float
%2112 = torch.aten.item %2110 : !torch.vtensor<[],si8> -> !torch.int
%2113 = torch.aten._make_per_tensor_quantized_tensor %2108, %2111, %2112 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%2114 = torch.aten.dequantize.self %2113 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%int1_545 = torch.constant.int 1
%2115 = torch.aten.add.Tensor %2114, %1989, %int1_545 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[1,1024,28,28],f32>, !torch.int -> !torch.vtensor<[1,1024,28,28],f32>
%2116 = torch.aten.relu %2115 : !torch.vtensor<[1,1024,28,28],f32> -> !torch.vtensor<[1,1024,28,28],f32>
%2117 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2118 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_546 = torch.constant.int 12
%2119 = torch.aten.item %2117 : !torch.vtensor<[],f32> -> !torch.float
%2120 = torch.aten.item %2118 : !torch.vtensor<[],si8> -> !torch.int
%2121 = torch.aten.quantize_per_tensor %2116, %2119, %2120, %int12_546 : !torch.vtensor<[1,1024,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%2122 = torch.aten.int_repr %2121 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],si8>
%2123 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2124 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2125 = torch.aten.item %2123 : !torch.vtensor<[],f32> -> !torch.float
%2126 = torch.aten.item %2124 : !torch.vtensor<[],si8> -> !torch.int
%2127 = torch.aten._make_per_tensor_quantized_tensor %2122, %2125, %2126 : !torch.vtensor<[1,1024,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,28,28],!torch.qint8>
%2128 = torch.aten.dequantize.self %2127 : !torch.vtensor<[1,1024,28,28],!torch.qint8> -> !torch.vtensor<[1,1024,28,28],f32>
%2129 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2130 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_547 = torch.constant.int 12
%2131 = torch.aten.item %2129 : !torch.vtensor<[],f32> -> !torch.float
%2132 = torch.aten.item %2130 : !torch.vtensor<[],si8> -> !torch.int
%2133 = torch.aten.quantize_per_tensor %86, %2131, %2132, %int12_547 : !torch.vtensor<[512,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,1024,1,1],!torch.qint8>
%2134 = torch.aten.int_repr %2133 : !torch.vtensor<[512,1024,1,1],!torch.qint8> -> !torch.vtensor<[512,1024,1,1],si8>
%2135 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2136 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2137 = torch.aten.item %2135 : !torch.vtensor<[],f32> -> !torch.float
%2138 = torch.aten.item %2136 : !torch.vtensor<[],si8> -> !torch.int
%2139 = torch.aten._make_per_tensor_quantized_tensor %2134, %2137, %2138 : !torch.vtensor<[512,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,1024,1,1],!torch.qint8>
%2140 = torch.aten.dequantize.self %2139 : !torch.vtensor<[512,1024,1,1],!torch.qint8> -> !torch.vtensor<[512,1024,1,1],f32>
%2141 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2142 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_548 = torch.constant.int 12
%2143 = torch.aten.item %2141 : !torch.vtensor<[],f32> -> !torch.float
%2144 = torch.aten.item %2142 : !torch.vtensor<[],si8> -> !torch.int
%2145 = torch.aten.quantize_per_tensor %87, %2143, %2144, %int12_548 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2146 = torch.aten.int_repr %2145 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2147 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2148 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2149 = torch.aten.item %2147 : !torch.vtensor<[],f32> -> !torch.float
%2150 = torch.aten.item %2148 : !torch.vtensor<[],si8> -> !torch.int
%2151 = torch.aten._make_per_tensor_quantized_tensor %2146, %2149, %2150 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2152 = torch.aten.dequantize.self %2151 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_549 = torch.constant.int 0
%int0_550 = torch.constant.int 0
%int1_551 = torch.constant.int 1
%int1_552 = torch.constant.int 1
%int1_553 = torch.constant.int 1
%int1_554 = torch.constant.int 1
%int0_555 = torch.constant.int 0
%2153 = torch.prim.ListConstruct %int0_549, %int0_550 : (!torch.int, !torch.int) -> !torch.list<int>
%2154 = torch.prim.ListConstruct %int1_551, %int1_552 : (!torch.int, !torch.int) -> !torch.list<int>
%2155 = torch.prim.ListConstruct %int1_553, %int1_554 : (!torch.int, !torch.int) -> !torch.list<int>
%2156 = torch.prim.ListConstruct %int0_555, %int0_555 : (!torch.int, !torch.int) -> !torch.list<int>
%false_556 = torch.constant.bool false
%int1_557 = torch.constant.int 1
%2157 = torch.aten.convolution %2128, %2140, %2152, %2155, %2153, %2154, %false_556, %2156, %int1_557 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[512,1024,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%2158 = torch.aten.relu %2157 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%2159 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2160 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_558 = torch.constant.int 12
%2161 = torch.aten.item %2159 : !torch.vtensor<[],f32> -> !torch.float
%2162 = torch.aten.item %2160 : !torch.vtensor<[],si8> -> !torch.int
%2163 = torch.aten.quantize_per_tensor %2158, %2161, %2162, %int12_558 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2164 = torch.aten.int_repr %2163 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%2165 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2166 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2167 = torch.aten.item %2165 : !torch.vtensor<[],f32> -> !torch.float
%2168 = torch.aten.item %2166 : !torch.vtensor<[],si8> -> !torch.int
%2169 = torch.aten._make_per_tensor_quantized_tensor %2164, %2167, %2168 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2170 = torch.aten.dequantize.self %2169 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%2171 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2172 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_559 = torch.constant.int 12
%2173 = torch.aten.item %2171 : !torch.vtensor<[],f32> -> !torch.float
%2174 = torch.aten.item %2172 : !torch.vtensor<[],si8> -> !torch.int
%2175 = torch.aten.quantize_per_tensor %88, %2173, %2174, %int12_559 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2176 = torch.aten.int_repr %2175 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%2177 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2178 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2179 = torch.aten.item %2177 : !torch.vtensor<[],f32> -> !torch.float
%2180 = torch.aten.item %2178 : !torch.vtensor<[],si8> -> !torch.int
%2181 = torch.aten._make_per_tensor_quantized_tensor %2176, %2179, %2180 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2182 = torch.aten.dequantize.self %2181 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%2183 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2184 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_560 = torch.constant.int 12
%2185 = torch.aten.item %2183 : !torch.vtensor<[],f32> -> !torch.float
%2186 = torch.aten.item %2184 : !torch.vtensor<[],si8> -> !torch.int
%2187 = torch.aten.quantize_per_tensor %89, %2185, %2186, %int12_560 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2188 = torch.aten.int_repr %2187 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2189 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2190 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2191 = torch.aten.item %2189 : !torch.vtensor<[],f32> -> !torch.float
%2192 = torch.aten.item %2190 : !torch.vtensor<[],si8> -> !torch.int
%2193 = torch.aten._make_per_tensor_quantized_tensor %2188, %2191, %2192 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2194 = torch.aten.dequantize.self %2193 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int2_561 = torch.constant.int 2
%int2_562 = torch.constant.int 2
%int2_563 = torch.constant.int 2
%int2_564 = torch.constant.int 2
%int1_565 = torch.constant.int 1
%int1_566 = torch.constant.int 1
%int0_567 = torch.constant.int 0
%2195 = torch.prim.ListConstruct %int2_561, %int2_562 : (!torch.int, !torch.int) -> !torch.list<int>
%2196 = torch.prim.ListConstruct %int2_563, %int2_564 : (!torch.int, !torch.int) -> !torch.list<int>
%2197 = torch.prim.ListConstruct %int1_565, %int1_566 : (!torch.int, !torch.int) -> !torch.list<int>
%2198 = torch.prim.ListConstruct %int0_567, %int0_567 : (!torch.int, !torch.int) -> !torch.list<int>
%false_568 = torch.constant.bool false
%int1_569 = torch.constant.int 1
%2199 = torch.aten.convolution %2170, %2182, %2194, %2197, %2195, %2196, %false_568, %2198, %int1_569 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%2200 = torch.aten.relu %2199 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%2201 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2202 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_570 = torch.constant.int 12
%2203 = torch.aten.item %2201 : !torch.vtensor<[],f32> -> !torch.float
%2204 = torch.aten.item %2202 : !torch.vtensor<[],si8> -> !torch.int
%2205 = torch.aten.quantize_per_tensor %2200, %2203, %2204, %int12_570 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2206 = torch.aten.int_repr %2205 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%2207 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2208 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2209 = torch.aten.item %2207 : !torch.vtensor<[],f32> -> !torch.float
%2210 = torch.aten.item %2208 : !torch.vtensor<[],si8> -> !torch.int
%2211 = torch.aten._make_per_tensor_quantized_tensor %2206, %2209, %2210 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2212 = torch.aten.dequantize.self %2211 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%2213 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2214 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_571 = torch.constant.int 12
%2215 = torch.aten.item %2213 : !torch.vtensor<[],f32> -> !torch.float
%2216 = torch.aten.item %2214 : !torch.vtensor<[],si8> -> !torch.int
%2217 = torch.aten.quantize_per_tensor %90, %2215, %2216, %int12_571 : !torch.vtensor<[2048,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2218 = torch.aten.int_repr %2217 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],si8>
%2219 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2220 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2221 = torch.aten.item %2219 : !torch.vtensor<[],f32> -> !torch.float
%2222 = torch.aten.item %2220 : !torch.vtensor<[],si8> -> !torch.int
%2223 = torch.aten._make_per_tensor_quantized_tensor %2218, %2221, %2222 : !torch.vtensor<[2048,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2224 = torch.aten.dequantize.self %2223 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],f32>
%2225 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2226 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_572 = torch.constant.int 12
%2227 = torch.aten.item %2225 : !torch.vtensor<[],f32> -> !torch.float
%2228 = torch.aten.item %2226 : !torch.vtensor<[],si8> -> !torch.int
%2229 = torch.aten.quantize_per_tensor %91, %2227, %2228, %int12_572 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2230 = torch.aten.int_repr %2229 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2231 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2232 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2233 = torch.aten.item %2231 : !torch.vtensor<[],f32> -> !torch.float
%2234 = torch.aten.item %2232 : !torch.vtensor<[],si8> -> !torch.int
%2235 = torch.aten._make_per_tensor_quantized_tensor %2230, %2233, %2234 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2236 = torch.aten.dequantize.self %2235 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_573 = torch.constant.int 0
%int0_574 = torch.constant.int 0
%int1_575 = torch.constant.int 1
%int1_576 = torch.constant.int 1
%int1_577 = torch.constant.int 1
%int1_578 = torch.constant.int 1
%int0_579 = torch.constant.int 0
%2237 = torch.prim.ListConstruct %int0_573, %int0_574 : (!torch.int, !torch.int) -> !torch.list<int>
%2238 = torch.prim.ListConstruct %int1_575, %int1_576 : (!torch.int, !torch.int) -> !torch.list<int>
%2239 = torch.prim.ListConstruct %int1_577, %int1_578 : (!torch.int, !torch.int) -> !torch.list<int>
%2240 = torch.prim.ListConstruct %int0_579, %int0_579 : (!torch.int, !torch.int) -> !torch.list<int>
%false_580 = torch.constant.bool false
%int1_581 = torch.constant.int 1
%2241 = torch.aten.convolution %2212, %2224, %2236, %2239, %2237, %2238, %false_580, %2240, %int1_581 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[2048,512,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2242 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2243 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_582 = torch.constant.int 12
%2244 = torch.aten.item %2242 : !torch.vtensor<[],f32> -> !torch.float
%2245 = torch.aten.item %2243 : !torch.vtensor<[],si8> -> !torch.int
%2246 = torch.aten.quantize_per_tensor %2241, %2244, %2245, %int12_582 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2247 = torch.aten.int_repr %2246 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2248 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2249 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2250 = torch.aten.item %2248 : !torch.vtensor<[],f32> -> !torch.float
%2251 = torch.aten.item %2249 : !torch.vtensor<[],si8> -> !torch.int
%2252 = torch.aten._make_per_tensor_quantized_tensor %2247, %2250, %2251 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2253 = torch.aten.dequantize.self %2252 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%2254 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2255 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_583 = torch.constant.int 12
%2256 = torch.aten.item %2254 : !torch.vtensor<[],f32> -> !torch.float
%2257 = torch.aten.item %2255 : !torch.vtensor<[],si8> -> !torch.int
%2258 = torch.aten.quantize_per_tensor %92, %2256, %2257, %int12_583 : !torch.vtensor<[2048,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,1024,1,1],!torch.qint8>
%2259 = torch.aten.int_repr %2258 : !torch.vtensor<[2048,1024,1,1],!torch.qint8> -> !torch.vtensor<[2048,1024,1,1],si8>
%2260 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2261 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2262 = torch.aten.item %2260 : !torch.vtensor<[],f32> -> !torch.float
%2263 = torch.aten.item %2261 : !torch.vtensor<[],si8> -> !torch.int
%2264 = torch.aten._make_per_tensor_quantized_tensor %2259, %2262, %2263 : !torch.vtensor<[2048,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,1024,1,1],!torch.qint8>
%2265 = torch.aten.dequantize.self %2264 : !torch.vtensor<[2048,1024,1,1],!torch.qint8> -> !torch.vtensor<[2048,1024,1,1],f32>
%2266 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2267 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_584 = torch.constant.int 12
%2268 = torch.aten.item %2266 : !torch.vtensor<[],f32> -> !torch.float
%2269 = torch.aten.item %2267 : !torch.vtensor<[],si8> -> !torch.int
%2270 = torch.aten.quantize_per_tensor %93, %2268, %2269, %int12_584 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2271 = torch.aten.int_repr %2270 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2272 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2273 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2274 = torch.aten.item %2272 : !torch.vtensor<[],f32> -> !torch.float
%2275 = torch.aten.item %2273 : !torch.vtensor<[],si8> -> !torch.int
%2276 = torch.aten._make_per_tensor_quantized_tensor %2271, %2274, %2275 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2277 = torch.aten.dequantize.self %2276 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_585 = torch.constant.int 0
%int0_586 = torch.constant.int 0
%int1_587 = torch.constant.int 1
%int1_588 = torch.constant.int 1
%int1_589 = torch.constant.int 1
%int1_590 = torch.constant.int 1
%int0_591 = torch.constant.int 0
%2278 = torch.prim.ListConstruct %int0_585, %int0_586 : (!torch.int, !torch.int) -> !torch.list<int>
%2279 = torch.prim.ListConstruct %int1_587, %int1_588 : (!torch.int, !torch.int) -> !torch.list<int>
%2280 = torch.prim.ListConstruct %int1_589, %int1_590 : (!torch.int, !torch.int) -> !torch.list<int>
%2281 = torch.prim.ListConstruct %int0_591, %int0_591 : (!torch.int, !torch.int) -> !torch.list<int>
%false_592 = torch.constant.bool false
%int1_593 = torch.constant.int 1
%2282 = torch.aten.convolution %2128, %2265, %2277, %2280, %2278, %2279, %false_592, %2281, %int1_593 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[2048,1024,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2283 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2284 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_594 = torch.constant.int 12
%2285 = torch.aten.item %2283 : !torch.vtensor<[],f32> -> !torch.float
%2286 = torch.aten.item %2284 : !torch.vtensor<[],si8> -> !torch.int
%2287 = torch.aten.quantize_per_tensor %2282, %2285, %2286, %int12_594 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2288 = torch.aten.int_repr %2287 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2289 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2290 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2291 = torch.aten.item %2289 : !torch.vtensor<[],f32> -> !torch.float
%2292 = torch.aten.item %2290 : !torch.vtensor<[],si8> -> !torch.int
%2293 = torch.aten._make_per_tensor_quantized_tensor %2288, %2291, %2292 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2294 = torch.aten.dequantize.self %2293 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%int1_595 = torch.constant.int 1
%2295 = torch.aten.add.Tensor %2253, %2294, %int1_595 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[1,2048,28,28],f32>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2296 = torch.aten.relu %2295 : !torch.vtensor<[1,2048,28,28],f32> -> !torch.vtensor<[1,2048,28,28],f32>
%2297 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2298 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_596 = torch.constant.int 12
%2299 = torch.aten.item %2297 : !torch.vtensor<[],f32> -> !torch.float
%2300 = torch.aten.item %2298 : !torch.vtensor<[],si8> -> !torch.int
%2301 = torch.aten.quantize_per_tensor %2296, %2299, %2300, %int12_596 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2302 = torch.aten.int_repr %2301 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2303 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2305 = torch.aten.item %2303 : !torch.vtensor<[],f32> -> !torch.float
%2306 = torch.aten.item %2304 : !torch.vtensor<[],si8> -> !torch.int
%2307 = torch.aten._make_per_tensor_quantized_tensor %2302, %2305, %2306 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2308 = torch.aten.dequantize.self %2307 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%2309 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_597 = torch.constant.int 12
%2311 = torch.aten.item %2309 : !torch.vtensor<[],f32> -> !torch.float
%2312 = torch.aten.item %2310 : !torch.vtensor<[],si8> -> !torch.int
%2313 = torch.aten.quantize_per_tensor %94, %2311, %2312, %int12_597 : !torch.vtensor<[512,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2314 = torch.aten.int_repr %2313 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],si8>
%2315 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2316 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2317 = torch.aten.item %2315 : !torch.vtensor<[],f32> -> !torch.float
%2318 = torch.aten.item %2316 : !torch.vtensor<[],si8> -> !torch.int
%2319 = torch.aten._make_per_tensor_quantized_tensor %2314, %2317, %2318 : !torch.vtensor<[512,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2320 = torch.aten.dequantize.self %2319 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],f32>
%2321 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2322 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_598 = torch.constant.int 12
%2323 = torch.aten.item %2321 : !torch.vtensor<[],f32> -> !torch.float
%2324 = torch.aten.item %2322 : !torch.vtensor<[],si8> -> !torch.int
%2325 = torch.aten.quantize_per_tensor %95, %2323, %2324, %int12_598 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2326 = torch.aten.int_repr %2325 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2327 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2328 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2329 = torch.aten.item %2327 : !torch.vtensor<[],f32> -> !torch.float
%2330 = torch.aten.item %2328 : !torch.vtensor<[],si8> -> !torch.int
%2331 = torch.aten._make_per_tensor_quantized_tensor %2326, %2329, %2330 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2332 = torch.aten.dequantize.self %2331 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_599 = torch.constant.int 0
%int0_600 = torch.constant.int 0
%int1_601 = torch.constant.int 1
%int1_602 = torch.constant.int 1
%int1_603 = torch.constant.int 1
%int1_604 = torch.constant.int 1
%int0_605 = torch.constant.int 0
%2333 = torch.prim.ListConstruct %int0_599, %int0_600 : (!torch.int, !torch.int) -> !torch.list<int>
%2334 = torch.prim.ListConstruct %int1_601, %int1_602 : (!torch.int, !torch.int) -> !torch.list<int>
%2335 = torch.prim.ListConstruct %int1_603, %int1_604 : (!torch.int, !torch.int) -> !torch.list<int>
%2336 = torch.prim.ListConstruct %int0_605, %int0_605 : (!torch.int, !torch.int) -> !torch.list<int>
%false_606 = torch.constant.bool false
%int1_607 = torch.constant.int 1
%2337 = torch.aten.convolution %2308, %2320, %2332, %2335, %2333, %2334, %false_606, %2336, %int1_607 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[512,2048,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%2338 = torch.aten.relu %2337 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%2339 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2340 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_608 = torch.constant.int 12
%2341 = torch.aten.item %2339 : !torch.vtensor<[],f32> -> !torch.float
%2342 = torch.aten.item %2340 : !torch.vtensor<[],si8> -> !torch.int
%2343 = torch.aten.quantize_per_tensor %2338, %2341, %2342, %int12_608 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2344 = torch.aten.int_repr %2343 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%2345 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2346 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2347 = torch.aten.item %2345 : !torch.vtensor<[],f32> -> !torch.float
%2348 = torch.aten.item %2346 : !torch.vtensor<[],si8> -> !torch.int
%2349 = torch.aten._make_per_tensor_quantized_tensor %2344, %2347, %2348 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2350 = torch.aten.dequantize.self %2349 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%2351 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2352 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_609 = torch.constant.int 12
%2353 = torch.aten.item %2351 : !torch.vtensor<[],f32> -> !torch.float
%2354 = torch.aten.item %2352 : !torch.vtensor<[],si8> -> !torch.int
%2355 = torch.aten.quantize_per_tensor %96, %2353, %2354, %int12_609 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2356 = torch.aten.int_repr %2355 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%2357 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2358 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2359 = torch.aten.item %2357 : !torch.vtensor<[],f32> -> !torch.float
%2360 = torch.aten.item %2358 : !torch.vtensor<[],si8> -> !torch.int
%2361 = torch.aten._make_per_tensor_quantized_tensor %2356, %2359, %2360 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2362 = torch.aten.dequantize.self %2361 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%2363 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2364 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_610 = torch.constant.int 12
%2365 = torch.aten.item %2363 : !torch.vtensor<[],f32> -> !torch.float
%2366 = torch.aten.item %2364 : !torch.vtensor<[],si8> -> !torch.int
%2367 = torch.aten.quantize_per_tensor %97, %2365, %2366, %int12_610 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2368 = torch.aten.int_repr %2367 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2369 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2370 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2371 = torch.aten.item %2369 : !torch.vtensor<[],f32> -> !torch.float
%2372 = torch.aten.item %2370 : !torch.vtensor<[],si8> -> !torch.int
%2373 = torch.aten._make_per_tensor_quantized_tensor %2368, %2371, %2372 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2374 = torch.aten.dequantize.self %2373 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int4 = torch.constant.int 4
%int4_611 = torch.constant.int 4
%int4_612 = torch.constant.int 4
%int4_613 = torch.constant.int 4
%int1_614 = torch.constant.int 1
%int1_615 = torch.constant.int 1
%int0_616 = torch.constant.int 0
%2375 = torch.prim.ListConstruct %int4, %int4_611 : (!torch.int, !torch.int) -> !torch.list<int>
%2376 = torch.prim.ListConstruct %int4_612, %int4_613 : (!torch.int, !torch.int) -> !torch.list<int>
%2377 = torch.prim.ListConstruct %int1_614, %int1_615 : (!torch.int, !torch.int) -> !torch.list<int>
%2378 = torch.prim.ListConstruct %int0_616, %int0_616 : (!torch.int, !torch.int) -> !torch.list<int>
%false_617 = torch.constant.bool false
%int1_618 = torch.constant.int 1
%2379 = torch.aten.convolution %2350, %2362, %2374, %2377, %2375, %2376, %false_617, %2378, %int1_618 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%2380 = torch.aten.relu %2379 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%2381 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2382 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_619 = torch.constant.int 12
%2383 = torch.aten.item %2381 : !torch.vtensor<[],f32> -> !torch.float
%2384 = torch.aten.item %2382 : !torch.vtensor<[],si8> -> !torch.int
%2385 = torch.aten.quantize_per_tensor %2380, %2383, %2384, %int12_619 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2386 = torch.aten.int_repr %2385 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%2387 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2388 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2389 = torch.aten.item %2387 : !torch.vtensor<[],f32> -> !torch.float
%2390 = torch.aten.item %2388 : !torch.vtensor<[],si8> -> !torch.int
%2391 = torch.aten._make_per_tensor_quantized_tensor %2386, %2389, %2390 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2392 = torch.aten.dequantize.self %2391 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%2393 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2394 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_620 = torch.constant.int 12
%2395 = torch.aten.item %2393 : !torch.vtensor<[],f32> -> !torch.float
%2396 = torch.aten.item %2394 : !torch.vtensor<[],si8> -> !torch.int
%2397 = torch.aten.quantize_per_tensor %98, %2395, %2396, %int12_620 : !torch.vtensor<[2048,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2398 = torch.aten.int_repr %2397 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],si8>
%2399 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2400 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2401 = torch.aten.item %2399 : !torch.vtensor<[],f32> -> !torch.float
%2402 = torch.aten.item %2400 : !torch.vtensor<[],si8> -> !torch.int
%2403 = torch.aten._make_per_tensor_quantized_tensor %2398, %2401, %2402 : !torch.vtensor<[2048,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2404 = torch.aten.dequantize.self %2403 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],f32>
%2405 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2406 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_621 = torch.constant.int 12
%2407 = torch.aten.item %2405 : !torch.vtensor<[],f32> -> !torch.float
%2408 = torch.aten.item %2406 : !torch.vtensor<[],si8> -> !torch.int
%2409 = torch.aten.quantize_per_tensor %99, %2407, %2408, %int12_621 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2410 = torch.aten.int_repr %2409 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2411 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2412 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2413 = torch.aten.item %2411 : !torch.vtensor<[],f32> -> !torch.float
%2414 = torch.aten.item %2412 : !torch.vtensor<[],si8> -> !torch.int
%2415 = torch.aten._make_per_tensor_quantized_tensor %2410, %2413, %2414 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2416 = torch.aten.dequantize.self %2415 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_622 = torch.constant.int 0
%int0_623 = torch.constant.int 0
%int1_624 = torch.constant.int 1
%int1_625 = torch.constant.int 1
%int1_626 = torch.constant.int 1
%int1_627 = torch.constant.int 1
%int0_628 = torch.constant.int 0
%2417 = torch.prim.ListConstruct %int0_622, %int0_623 : (!torch.int, !torch.int) -> !torch.list<int>
%2418 = torch.prim.ListConstruct %int1_624, %int1_625 : (!torch.int, !torch.int) -> !torch.list<int>
%2419 = torch.prim.ListConstruct %int1_626, %int1_627 : (!torch.int, !torch.int) -> !torch.list<int>
%2420 = torch.prim.ListConstruct %int0_628, %int0_628 : (!torch.int, !torch.int) -> !torch.list<int>
%false_629 = torch.constant.bool false
%int1_630 = torch.constant.int 1
%2421 = torch.aten.convolution %2392, %2404, %2416, %2419, %2417, %2418, %false_629, %2420, %int1_630 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[2048,512,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2422 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_631 = torch.constant.int 12
%2424 = torch.aten.item %2422 : !torch.vtensor<[],f32> -> !torch.float
%2425 = torch.aten.item %2423 : !torch.vtensor<[],si8> -> !torch.int
%2426 = torch.aten.quantize_per_tensor %2421, %2424, %2425, %int12_631 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2427 = torch.aten.int_repr %2426 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2428 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2430 = torch.aten.item %2428 : !torch.vtensor<[],f32> -> !torch.float
%2431 = torch.aten.item %2429 : !torch.vtensor<[],si8> -> !torch.int
%2432 = torch.aten._make_per_tensor_quantized_tensor %2427, %2430, %2431 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2433 = torch.aten.dequantize.self %2432 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%int1_632 = torch.constant.int 1
%2434 = torch.aten.add.Tensor %2433, %2308, %int1_632 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[1,2048,28,28],f32>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2435 = torch.aten.relu %2434 : !torch.vtensor<[1,2048,28,28],f32> -> !torch.vtensor<[1,2048,28,28],f32>
%2436 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2437 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_633 = torch.constant.int 12
%2438 = torch.aten.item %2436 : !torch.vtensor<[],f32> -> !torch.float
%2439 = torch.aten.item %2437 : !torch.vtensor<[],si8> -> !torch.int
%2440 = torch.aten.quantize_per_tensor %2435, %2438, %2439, %int12_633 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2441 = torch.aten.int_repr %2440 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2442 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2443 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2444 = torch.aten.item %2442 : !torch.vtensor<[],f32> -> !torch.float
%2445 = torch.aten.item %2443 : !torch.vtensor<[],si8> -> !torch.int
%2446 = torch.aten._make_per_tensor_quantized_tensor %2441, %2444, %2445 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2447 = torch.aten.dequantize.self %2446 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%2448 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2449 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_634 = torch.constant.int 12
%2450 = torch.aten.item %2448 : !torch.vtensor<[],f32> -> !torch.float
%2451 = torch.aten.item %2449 : !torch.vtensor<[],si8> -> !torch.int
%2452 = torch.aten.quantize_per_tensor %100, %2450, %2451, %int12_634 : !torch.vtensor<[512,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2453 = torch.aten.int_repr %2452 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],si8>
%2454 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2455 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2456 = torch.aten.item %2454 : !torch.vtensor<[],f32> -> !torch.float
%2457 = torch.aten.item %2455 : !torch.vtensor<[],si8> -> !torch.int
%2458 = torch.aten._make_per_tensor_quantized_tensor %2453, %2456, %2457 : !torch.vtensor<[512,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2459 = torch.aten.dequantize.self %2458 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],f32>
%2460 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2461 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_635 = torch.constant.int 12
%2462 = torch.aten.item %2460 : !torch.vtensor<[],f32> -> !torch.float
%2463 = torch.aten.item %2461 : !torch.vtensor<[],si8> -> !torch.int
%2464 = torch.aten.quantize_per_tensor %101, %2462, %2463, %int12_635 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2465 = torch.aten.int_repr %2464 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2466 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2467 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2468 = torch.aten.item %2466 : !torch.vtensor<[],f32> -> !torch.float
%2469 = torch.aten.item %2467 : !torch.vtensor<[],si8> -> !torch.int
%2470 = torch.aten._make_per_tensor_quantized_tensor %2465, %2468, %2469 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2471 = torch.aten.dequantize.self %2470 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_636 = torch.constant.int 0
%int0_637 = torch.constant.int 0
%int1_638 = torch.constant.int 1
%int1_639 = torch.constant.int 1
%int1_640 = torch.constant.int 1
%int1_641 = torch.constant.int 1
%int0_642 = torch.constant.int 0
%2472 = torch.prim.ListConstruct %int0_636, %int0_637 : (!torch.int, !torch.int) -> !torch.list<int>
%2473 = torch.prim.ListConstruct %int1_638, %int1_639 : (!torch.int, !torch.int) -> !torch.list<int>
%2474 = torch.prim.ListConstruct %int1_640, %int1_641 : (!torch.int, !torch.int) -> !torch.list<int>
%2475 = torch.prim.ListConstruct %int0_642, %int0_642 : (!torch.int, !torch.int) -> !torch.list<int>
%false_643 = torch.constant.bool false
%int1_644 = torch.constant.int 1
%2476 = torch.aten.convolution %2447, %2459, %2471, %2474, %2472, %2473, %false_643, %2475, %int1_644 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[512,2048,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%2477 = torch.aten.relu %2476 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%2478 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2479 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_645 = torch.constant.int 12
%2480 = torch.aten.item %2478 : !torch.vtensor<[],f32> -> !torch.float
%2481 = torch.aten.item %2479 : !torch.vtensor<[],si8> -> !torch.int
%2482 = torch.aten.quantize_per_tensor %2477, %2480, %2481, %int12_645 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2483 = torch.aten.int_repr %2482 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%2484 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2485 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2486 = torch.aten.item %2484 : !torch.vtensor<[],f32> -> !torch.float
%2487 = torch.aten.item %2485 : !torch.vtensor<[],si8> -> !torch.int
%2488 = torch.aten._make_per_tensor_quantized_tensor %2483, %2486, %2487 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2489 = torch.aten.dequantize.self %2488 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%2490 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2491 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_646 = torch.constant.int 12
%2492 = torch.aten.item %2490 : !torch.vtensor<[],f32> -> !torch.float
%2493 = torch.aten.item %2491 : !torch.vtensor<[],si8> -> !torch.int
%2494 = torch.aten.quantize_per_tensor %102, %2492, %2493, %int12_646 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2495 = torch.aten.int_repr %2494 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%2496 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2497 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2498 = torch.aten.item %2496 : !torch.vtensor<[],f32> -> !torch.float
%2499 = torch.aten.item %2497 : !torch.vtensor<[],si8> -> !torch.int
%2500 = torch.aten._make_per_tensor_quantized_tensor %2495, %2498, %2499 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2501 = torch.aten.dequantize.self %2500 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%2502 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2503 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_647 = torch.constant.int 12
%2504 = torch.aten.item %2502 : !torch.vtensor<[],f32> -> !torch.float
%2505 = torch.aten.item %2503 : !torch.vtensor<[],si8> -> !torch.int
%2506 = torch.aten.quantize_per_tensor %103, %2504, %2505, %int12_647 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2507 = torch.aten.int_repr %2506 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2508 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2509 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2510 = torch.aten.item %2508 : !torch.vtensor<[],f32> -> !torch.float
%2511 = torch.aten.item %2509 : !torch.vtensor<[],si8> -> !torch.int
%2512 = torch.aten._make_per_tensor_quantized_tensor %2507, %2510, %2511 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2513 = torch.aten.dequantize.self %2512 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int4_648 = torch.constant.int 4
%int4_649 = torch.constant.int 4
%int4_650 = torch.constant.int 4
%int4_651 = torch.constant.int 4
%int1_652 = torch.constant.int 1
%int1_653 = torch.constant.int 1
%int0_654 = torch.constant.int 0
%2514 = torch.prim.ListConstruct %int4_648, %int4_649 : (!torch.int, !torch.int) -> !torch.list<int>
%2515 = torch.prim.ListConstruct %int4_650, %int4_651 : (!torch.int, !torch.int) -> !torch.list<int>
%2516 = torch.prim.ListConstruct %int1_652, %int1_653 : (!torch.int, !torch.int) -> !torch.list<int>
%2517 = torch.prim.ListConstruct %int0_654, %int0_654 : (!torch.int, !torch.int) -> !torch.list<int>
%false_655 = torch.constant.bool false
%int1_656 = torch.constant.int 1
%2518 = torch.aten.convolution %2489, %2501, %2513, %2516, %2514, %2515, %false_655, %2517, %int1_656 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%2519 = torch.aten.relu %2518 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%2520 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_657 = torch.constant.int 12
%2522 = torch.aten.item %2520 : !torch.vtensor<[],f32> -> !torch.float
%2523 = torch.aten.item %2521 : !torch.vtensor<[],si8> -> !torch.int
%2524 = torch.aten.quantize_per_tensor %2519, %2522, %2523, %int12_657 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2525 = torch.aten.int_repr %2524 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%2526 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2527 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2528 = torch.aten.item %2526 : !torch.vtensor<[],f32> -> !torch.float
%2529 = torch.aten.item %2527 : !torch.vtensor<[],si8> -> !torch.int
%2530 = torch.aten._make_per_tensor_quantized_tensor %2525, %2528, %2529 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%2531 = torch.aten.dequantize.self %2530 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%2532 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2533 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_658 = torch.constant.int 12
%2534 = torch.aten.item %2532 : !torch.vtensor<[],f32> -> !torch.float
%2535 = torch.aten.item %2533 : !torch.vtensor<[],si8> -> !torch.int
%2536 = torch.aten.quantize_per_tensor %104, %2534, %2535, %int12_658 : !torch.vtensor<[2048,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2537 = torch.aten.int_repr %2536 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],si8>
%2538 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2539 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2540 = torch.aten.item %2538 : !torch.vtensor<[],f32> -> !torch.float
%2541 = torch.aten.item %2539 : !torch.vtensor<[],si8> -> !torch.int
%2542 = torch.aten._make_per_tensor_quantized_tensor %2537, %2540, %2541 : !torch.vtensor<[2048,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2543 = torch.aten.dequantize.self %2542 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],f32>
%2544 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2545 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_659 = torch.constant.int 12
%2546 = torch.aten.item %2544 : !torch.vtensor<[],f32> -> !torch.float
%2547 = torch.aten.item %2545 : !torch.vtensor<[],si8> -> !torch.int
%2548 = torch.aten.quantize_per_tensor %105, %2546, %2547, %int12_659 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2549 = torch.aten.int_repr %2548 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2550 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2551 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2552 = torch.aten.item %2550 : !torch.vtensor<[],f32> -> !torch.float
%2553 = torch.aten.item %2551 : !torch.vtensor<[],si8> -> !torch.int
%2554 = torch.aten._make_per_tensor_quantized_tensor %2549, %2552, %2553 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2555 = torch.aten.dequantize.self %2554 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_660 = torch.constant.int 0
%int0_661 = torch.constant.int 0
%int1_662 = torch.constant.int 1
%int1_663 = torch.constant.int 1
%int1_664 = torch.constant.int 1
%int1_665 = torch.constant.int 1
%int0_666 = torch.constant.int 0
%2556 = torch.prim.ListConstruct %int0_660, %int0_661 : (!torch.int, !torch.int) -> !torch.list<int>
%2557 = torch.prim.ListConstruct %int1_662, %int1_663 : (!torch.int, !torch.int) -> !torch.list<int>
%2558 = torch.prim.ListConstruct %int1_664, %int1_665 : (!torch.int, !torch.int) -> !torch.list<int>
%2559 = torch.prim.ListConstruct %int0_666, %int0_666 : (!torch.int, !torch.int) -> !torch.list<int>
%false_667 = torch.constant.bool false
%int1_668 = torch.constant.int 1
%2560 = torch.aten.convolution %2531, %2543, %2555, %2558, %2556, %2557, %false_667, %2559, %int1_668 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[2048,512,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2561 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2562 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_669 = torch.constant.int 12
%2563 = torch.aten.item %2561 : !torch.vtensor<[],f32> -> !torch.float
%2564 = torch.aten.item %2562 : !torch.vtensor<[],si8> -> !torch.int
%2565 = torch.aten.quantize_per_tensor %2560, %2563, %2564, %int12_669 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2566 = torch.aten.int_repr %2565 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2567 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2568 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2569 = torch.aten.item %2567 : !torch.vtensor<[],f32> -> !torch.float
%2570 = torch.aten.item %2568 : !torch.vtensor<[],si8> -> !torch.int
%2571 = torch.aten._make_per_tensor_quantized_tensor %2566, %2569, %2570 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2572 = torch.aten.dequantize.self %2571 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%int1_670 = torch.constant.int 1
%2573 = torch.aten.add.Tensor %2572, %2447, %int1_670 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[1,2048,28,28],f32>, !torch.int -> !torch.vtensor<[1,2048,28,28],f32>
%2574 = torch.aten.relu %2573 : !torch.vtensor<[1,2048,28,28],f32> -> !torch.vtensor<[1,2048,28,28],f32>
%2575 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2576 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_671 = torch.constant.int 12
%2577 = torch.aten.item %2575 : !torch.vtensor<[],f32> -> !torch.float
%2578 = torch.aten.item %2576 : !torch.vtensor<[],si8> -> !torch.int
%2579 = torch.aten.quantize_per_tensor %2574, %2577, %2578, %int12_671 : !torch.vtensor<[1,2048,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2580 = torch.aten.int_repr %2579 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],si8>
%2581 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2582 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2583 = torch.aten.item %2581 : !torch.vtensor<[],f32> -> !torch.float
%2584 = torch.aten.item %2582 : !torch.vtensor<[],si8> -> !torch.int
%2585 = torch.aten._make_per_tensor_quantized_tensor %2580, %2583, %2584 : !torch.vtensor<[1,2048,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,28,28],!torch.qint8>
%2586 = torch.aten.dequantize.self %2585 : !torch.vtensor<[1,2048,28,28],!torch.qint8> -> !torch.vtensor<[1,2048,28,28],f32>
%2587 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2588 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_672 = torch.constant.int 12
%2589 = torch.aten.item %2587 : !torch.vtensor<[],f32> -> !torch.float
%2590 = torch.aten.item %2588 : !torch.vtensor<[],si8> -> !torch.int
%2591 = torch.aten.quantize_per_tensor %106, %2589, %2590, %int12_672 : !torch.vtensor<[256,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,2048,1,1],!torch.qint8>
%2592 = torch.aten.int_repr %2591 : !torch.vtensor<[256,2048,1,1],!torch.qint8> -> !torch.vtensor<[256,2048,1,1],si8>
%2593 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2594 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2595 = torch.aten.item %2593 : !torch.vtensor<[],f32> -> !torch.float
%2596 = torch.aten.item %2594 : !torch.vtensor<[],si8> -> !torch.int
%2597 = torch.aten._make_per_tensor_quantized_tensor %2592, %2595, %2596 : !torch.vtensor<[256,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,2048,1,1],!torch.qint8>
%2598 = torch.aten.dequantize.self %2597 : !torch.vtensor<[256,2048,1,1],!torch.qint8> -> !torch.vtensor<[256,2048,1,1],f32>
%2599 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2600 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_673 = torch.constant.int 12
%2601 = torch.aten.item %2599 : !torch.vtensor<[],f32> -> !torch.float
%2602 = torch.aten.item %2600 : !torch.vtensor<[],si8> -> !torch.int
%2603 = torch.aten.quantize_per_tensor %107, %2601, %2602, %int12_673 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2604 = torch.aten.int_repr %2603 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2605 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2606 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2607 = torch.aten.item %2605 : !torch.vtensor<[],f32> -> !torch.float
%2608 = torch.aten.item %2606 : !torch.vtensor<[],si8> -> !torch.int
%2609 = torch.aten._make_per_tensor_quantized_tensor %2604, %2607, %2608 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2610 = torch.aten.dequantize.self %2609 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_674 = torch.constant.int 0
%int0_675 = torch.constant.int 0
%int1_676 = torch.constant.int 1
%int1_677 = torch.constant.int 1
%int1_678 = torch.constant.int 1
%int1_679 = torch.constant.int 1
%int0_680 = torch.constant.int 0
%2611 = torch.prim.ListConstruct %int0_674, %int0_675 : (!torch.int, !torch.int) -> !torch.list<int>
%2612 = torch.prim.ListConstruct %int1_676, %int1_677 : (!torch.int, !torch.int) -> !torch.list<int>
%2613 = torch.prim.ListConstruct %int1_678, %int1_679 : (!torch.int, !torch.int) -> !torch.list<int>
%2614 = torch.prim.ListConstruct %int0_680, %int0_680 : (!torch.int, !torch.int) -> !torch.list<int>
%false_681 = torch.constant.bool false
%int1_682 = torch.constant.int 1
%2615 = torch.aten.convolution %2586, %2598, %2610, %2613, %2611, %2612, %false_681, %2614, %int1_682 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[256,2048,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2616 = torch.aten.relu %2615 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2617 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2618 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_683 = torch.constant.int 12
%2619 = torch.aten.item %2617 : !torch.vtensor<[],f32> -> !torch.float
%2620 = torch.aten.item %2618 : !torch.vtensor<[],si8> -> !torch.int
%2621 = torch.aten.quantize_per_tensor %108, %2619, %2620, %int12_683 : !torch.vtensor<[256,2048,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,2048,3,3],!torch.qint8>
%2622 = torch.aten.int_repr %2621 : !torch.vtensor<[256,2048,3,3],!torch.qint8> -> !torch.vtensor<[256,2048,3,3],si8>
%2623 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2624 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2625 = torch.aten.item %2623 : !torch.vtensor<[],f32> -> !torch.float
%2626 = torch.aten.item %2624 : !torch.vtensor<[],si8> -> !torch.int
%2627 = torch.aten._make_per_tensor_quantized_tensor %2622, %2625, %2626 : !torch.vtensor<[256,2048,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,2048,3,3],!torch.qint8>
%2628 = torch.aten.dequantize.self %2627 : !torch.vtensor<[256,2048,3,3],!torch.qint8> -> !torch.vtensor<[256,2048,3,3],f32>
%2629 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2630 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_684 = torch.constant.int 12
%2631 = torch.aten.item %2629 : !torch.vtensor<[],f32> -> !torch.float
%2632 = torch.aten.item %2630 : !torch.vtensor<[],si8> -> !torch.int
%2633 = torch.aten.quantize_per_tensor %109, %2631, %2632, %int12_684 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2634 = torch.aten.int_repr %2633 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2635 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2636 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2637 = torch.aten.item %2635 : !torch.vtensor<[],f32> -> !torch.float
%2638 = torch.aten.item %2636 : !torch.vtensor<[],si8> -> !torch.int
%2639 = torch.aten._make_per_tensor_quantized_tensor %2634, %2637, %2638 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2640 = torch.aten.dequantize.self %2639 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int12_685 = torch.constant.int 12
%int12_686 = torch.constant.int 12
%int12_687 = torch.constant.int 12
%int12_688 = torch.constant.int 12
%int1_689 = torch.constant.int 1
%int1_690 = torch.constant.int 1
%int0_691 = torch.constant.int 0
%2641 = torch.prim.ListConstruct %int12_685, %int12_686 : (!torch.int, !torch.int) -> !torch.list<int>
%2642 = torch.prim.ListConstruct %int12_687, %int12_688 : (!torch.int, !torch.int) -> !torch.list<int>
%2643 = torch.prim.ListConstruct %int1_689, %int1_690 : (!torch.int, !torch.int) -> !torch.list<int>
%2644 = torch.prim.ListConstruct %int0_691, %int0_691 : (!torch.int, !torch.int) -> !torch.list<int>
%false_692 = torch.constant.bool false
%int1_693 = torch.constant.int 1
%2645 = torch.aten.convolution %2586, %2628, %2640, %2643, %2641, %2642, %false_692, %2644, %int1_693 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[256,2048,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2646 = torch.aten.relu %2645 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2647 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2648 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_694 = torch.constant.int 12
%2649 = torch.aten.item %2647 : !torch.vtensor<[],f32> -> !torch.float
%2650 = torch.aten.item %2648 : !torch.vtensor<[],si8> -> !torch.int
%2651 = torch.aten.quantize_per_tensor %110, %2649, %2650, %int12_694 : !torch.vtensor<[256,2048,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,2048,3,3],!torch.qint8>
%2652 = torch.aten.int_repr %2651 : !torch.vtensor<[256,2048,3,3],!torch.qint8> -> !torch.vtensor<[256,2048,3,3],si8>
%2653 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2654 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2655 = torch.aten.item %2653 : !torch.vtensor<[],f32> -> !torch.float
%2656 = torch.aten.item %2654 : !torch.vtensor<[],si8> -> !torch.int
%2657 = torch.aten._make_per_tensor_quantized_tensor %2652, %2655, %2656 : !torch.vtensor<[256,2048,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,2048,3,3],!torch.qint8>
%2658 = torch.aten.dequantize.self %2657 : !torch.vtensor<[256,2048,3,3],!torch.qint8> -> !torch.vtensor<[256,2048,3,3],f32>
%2659 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2660 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_695 = torch.constant.int 12
%2661 = torch.aten.item %2659 : !torch.vtensor<[],f32> -> !torch.float
%2662 = torch.aten.item %2660 : !torch.vtensor<[],si8> -> !torch.int
%2663 = torch.aten.quantize_per_tensor %111, %2661, %2662, %int12_695 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2664 = torch.aten.int_repr %2663 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2665 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2666 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2667 = torch.aten.item %2665 : !torch.vtensor<[],f32> -> !torch.float
%2668 = torch.aten.item %2666 : !torch.vtensor<[],si8> -> !torch.int
%2669 = torch.aten._make_per_tensor_quantized_tensor %2664, %2667, %2668 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2670 = torch.aten.dequantize.self %2669 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int24 = torch.constant.int 24
%int24_696 = torch.constant.int 24
%int24_697 = torch.constant.int 24
%int24_698 = torch.constant.int 24
%int1_699 = torch.constant.int 1
%int1_700 = torch.constant.int 1
%int0_701 = torch.constant.int 0
%2671 = torch.prim.ListConstruct %int24, %int24_696 : (!torch.int, !torch.int) -> !torch.list<int>
%2672 = torch.prim.ListConstruct %int24_697, %int24_698 : (!torch.int, !torch.int) -> !torch.list<int>
%2673 = torch.prim.ListConstruct %int1_699, %int1_700 : (!torch.int, !torch.int) -> !torch.list<int>
%2674 = torch.prim.ListConstruct %int0_701, %int0_701 : (!torch.int, !torch.int) -> !torch.list<int>
%false_702 = torch.constant.bool false
%int1_703 = torch.constant.int 1
%2675 = torch.aten.convolution %2586, %2658, %2670, %2673, %2671, %2672, %false_702, %2674, %int1_703 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[256,2048,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2676 = torch.aten.relu %2675 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2677 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2678 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_704 = torch.constant.int 12
%2679 = torch.aten.item %2677 : !torch.vtensor<[],f32> -> !torch.float
%2680 = torch.aten.item %2678 : !torch.vtensor<[],si8> -> !torch.int
%2681 = torch.aten.quantize_per_tensor %112, %2679, %2680, %int12_704 : !torch.vtensor<[256,2048,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,2048,3,3],!torch.qint8>
%2682 = torch.aten.int_repr %2681 : !torch.vtensor<[256,2048,3,3],!torch.qint8> -> !torch.vtensor<[256,2048,3,3],si8>
%2683 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2684 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2685 = torch.aten.item %2683 : !torch.vtensor<[],f32> -> !torch.float
%2686 = torch.aten.item %2684 : !torch.vtensor<[],si8> -> !torch.int
%2687 = torch.aten._make_per_tensor_quantized_tensor %2682, %2685, %2686 : !torch.vtensor<[256,2048,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,2048,3,3],!torch.qint8>
%2688 = torch.aten.dequantize.self %2687 : !torch.vtensor<[256,2048,3,3],!torch.qint8> -> !torch.vtensor<[256,2048,3,3],f32>
%2689 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2690 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_705 = torch.constant.int 12
%2691 = torch.aten.item %2689 : !torch.vtensor<[],f32> -> !torch.float
%2692 = torch.aten.item %2690 : !torch.vtensor<[],si8> -> !torch.int
%2693 = torch.aten.quantize_per_tensor %113, %2691, %2692, %int12_705 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2694 = torch.aten.int_repr %2693 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2695 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2696 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2697 = torch.aten.item %2695 : !torch.vtensor<[],f32> -> !torch.float
%2698 = torch.aten.item %2696 : !torch.vtensor<[],si8> -> !torch.int
%2699 = torch.aten._make_per_tensor_quantized_tensor %2694, %2697, %2698 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2700 = torch.aten.dequantize.self %2699 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int36 = torch.constant.int 36
%int36_706 = torch.constant.int 36
%int36_707 = torch.constant.int 36
%int36_708 = torch.constant.int 36
%int1_709 = torch.constant.int 1
%int1_710 = torch.constant.int 1
%int0_711 = torch.constant.int 0
%2701 = torch.prim.ListConstruct %int36, %int36_706 : (!torch.int, !torch.int) -> !torch.list<int>
%2702 = torch.prim.ListConstruct %int36_707, %int36_708 : (!torch.int, !torch.int) -> !torch.list<int>
%2703 = torch.prim.ListConstruct %int1_709, %int1_710 : (!torch.int, !torch.int) -> !torch.list<int>
%2704 = torch.prim.ListConstruct %int0_711, %int0_711 : (!torch.int, !torch.int) -> !torch.list<int>
%false_712 = torch.constant.bool false
%int1_713 = torch.constant.int 1
%2705 = torch.aten.convolution %2586, %2688, %2700, %2703, %2701, %2702, %false_712, %2704, %int1_713 : !torch.vtensor<[1,2048,28,28],f32>, !torch.vtensor<[256,2048,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2706 = torch.aten.relu %2705 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2707 = torch.vtensor.literal(dense<28> : tensor<si64>) : !torch.vtensor<[],si64>
%2708 = torch.vtensor.literal(dense<28> : tensor<si64>) : !torch.vtensor<[],si64>
%int0_714 = torch.constant.int 0
%int1_715 = torch.constant.int 1
%int28 = torch.constant.int 28
%int28_716 = torch.constant.int 28
%2709 = torch.prim.ListConstruct %int28, %int28_716 : (!torch.int, !torch.int) -> !torch.list<int>
%2710 = torch.prim.ListConstruct %int0_714, %int0_714 : (!torch.int, !torch.int) -> !torch.list<int>
%2711 = torch.prim.ListConstruct %int1_715, %int1_715 : (!torch.int, !torch.int) -> !torch.list<int>
%false_717 = torch.constant.bool false
%none_718 = torch.constant.none
%2712 = torch.aten.avg_pool2d %2586, %2709, %2711, %2710, %false_717, %false_717, %none_718 : !torch.vtensor<[1,2048,28,28],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,2048,1,1],f32>
%2713 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%2714 = torch.aten.mul.Tensor %2712, %2713 : !torch.vtensor<[1,2048,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,2048,1,1],f32>
%2715 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2716 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_719 = torch.constant.int 12
%2717 = torch.aten.item %2715 : !torch.vtensor<[],f32> -> !torch.float
%2718 = torch.aten.item %2716 : !torch.vtensor<[],si8> -> !torch.int
%2719 = torch.aten.quantize_per_tensor %2714, %2717, %2718, %int12_719 : !torch.vtensor<[1,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,1,1],!torch.qint8>
%2720 = torch.aten.int_repr %2719 : !torch.vtensor<[1,2048,1,1],!torch.qint8> -> !torch.vtensor<[1,2048,1,1],si8>
%2721 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2722 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2723 = torch.aten.item %2721 : !torch.vtensor<[],f32> -> !torch.float
%2724 = torch.aten.item %2722 : !torch.vtensor<[],si8> -> !torch.int
%2725 = torch.aten._make_per_tensor_quantized_tensor %2720, %2723, %2724 : !torch.vtensor<[1,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,1,1],!torch.qint8>
%2726 = torch.aten.dequantize.self %2725 : !torch.vtensor<[1,2048,1,1],!torch.qint8> -> !torch.vtensor<[1,2048,1,1],f32>
%2727 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2728 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_720 = torch.constant.int 12
%2729 = torch.aten.item %2727 : !torch.vtensor<[],f32> -> !torch.float
%2730 = torch.aten.item %2728 : !torch.vtensor<[],si8> -> !torch.int
%2731 = torch.aten.quantize_per_tensor %114, %2729, %2730, %int12_720 : !torch.vtensor<[256,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,2048,1,1],!torch.qint8>
%2732 = torch.aten.int_repr %2731 : !torch.vtensor<[256,2048,1,1],!torch.qint8> -> !torch.vtensor<[256,2048,1,1],si8>
%2733 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2734 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2735 = torch.aten.item %2733 : !torch.vtensor<[],f32> -> !torch.float
%2736 = torch.aten.item %2734 : !torch.vtensor<[],si8> -> !torch.int
%2737 = torch.aten._make_per_tensor_quantized_tensor %2732, %2735, %2736 : !torch.vtensor<[256,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,2048,1,1],!torch.qint8>
%2738 = torch.aten.dequantize.self %2737 : !torch.vtensor<[256,2048,1,1],!torch.qint8> -> !torch.vtensor<[256,2048,1,1],f32>
%2739 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2740 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_721 = torch.constant.int 12
%2741 = torch.aten.item %2739 : !torch.vtensor<[],f32> -> !torch.float
%2742 = torch.aten.item %2740 : !torch.vtensor<[],si8> -> !torch.int
%2743 = torch.aten.quantize_per_tensor %115, %2741, %2742, %int12_721 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2744 = torch.aten.int_repr %2743 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2745 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2746 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2747 = torch.aten.item %2745 : !torch.vtensor<[],f32> -> !torch.float
%2748 = torch.aten.item %2746 : !torch.vtensor<[],si8> -> !torch.int
%2749 = torch.aten._make_per_tensor_quantized_tensor %2744, %2747, %2748 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2750 = torch.aten.dequantize.self %2749 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_722 = torch.constant.int 0
%int0_723 = torch.constant.int 0
%int1_724 = torch.constant.int 1
%int1_725 = torch.constant.int 1
%int1_726 = torch.constant.int 1
%int1_727 = torch.constant.int 1
%int0_728 = torch.constant.int 0
%2751 = torch.prim.ListConstruct %int0_722, %int0_723 : (!torch.int, !torch.int) -> !torch.list<int>
%2752 = torch.prim.ListConstruct %int1_724, %int1_725 : (!torch.int, !torch.int) -> !torch.list<int>
%2753 = torch.prim.ListConstruct %int1_726, %int1_727 : (!torch.int, !torch.int) -> !torch.list<int>
%2754 = torch.prim.ListConstruct %int0_728, %int0_728 : (!torch.int, !torch.int) -> !torch.list<int>
%false_729 = torch.constant.bool false
%int1_730 = torch.constant.int 1
%2755 = torch.aten.convolution %2726, %2738, %2750, %2753, %2751, %2752, %false_729, %2754, %int1_730 : !torch.vtensor<[1,2048,1,1],f32>, !torch.vtensor<[256,2048,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,1,1],f32>
%2756 = torch.aten.relu %2755 : !torch.vtensor<[1,256,1,1],f32> -> !torch.vtensor<[1,256,1,1],f32>
%2757 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2758 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_731 = torch.constant.int 12
%2759 = torch.aten.item %2757 : !torch.vtensor<[],f32> -> !torch.float
%2760 = torch.aten.item %2758 : !torch.vtensor<[],si8> -> !torch.int
%2761 = torch.aten.quantize_per_tensor %2756, %2759, %2760, %int12_731 : !torch.vtensor<[1,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,1,1],!torch.qint8>
%2762 = torch.aten.int_repr %2761 : !torch.vtensor<[1,256,1,1],!torch.qint8> -> !torch.vtensor<[1,256,1,1],si8>
%2763 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2764 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2765 = torch.aten.item %2763 : !torch.vtensor<[],f32> -> !torch.float
%2766 = torch.aten.item %2764 : !torch.vtensor<[],si8> -> !torch.int
%2767 = torch.aten._make_per_tensor_quantized_tensor %2762, %2765, %2766 : !torch.vtensor<[1,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,1,1],!torch.qint8>
%2768 = torch.aten.dequantize.self %2767 : !torch.vtensor<[1,256,1,1],!torch.qint8> -> !torch.vtensor<[1,256,1,1],f32>
%2769 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_732 = torch.constant.int 0
%int0_733 = torch.constant.int 0
%int0_734 = torch.constant.int 0
%2770 = torch.aten.select.int %2769, %int0_732, %int0_734 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2771 = torch.aten.item %2770 : !torch.vtensor<[1],si64> -> !torch.int
%2772 = torch.aten.lt.int %2771, %int0_732 : !torch.int, !torch.int -> !torch.bool
%2773 = torch.aten.Int.bool %2772 : !torch.bool -> !torch.int
%2774 = torch.aten.mul.int %2773, %int0_733 : !torch.int, !torch.int -> !torch.int
%2775 = torch.aten.add.int %2771, %2774 : !torch.int, !torch.int -> !torch.int
%2776 = torch.prim.ListConstruct %2775 : (!torch.int) -> !torch.list<int>
%false_735 = torch.constant.bool false
%none_736 = torch.constant.none
%2777 = torch.aten.tensor %2776, %none_736, %none_736, %false_735 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values, %indices = torch.aten.sort %2777, %int0_732, %false_735 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_737 = torch.constant.int 0
%2778 = torch.aten.select.int %values, %int0_732, %int0_737 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2779 = torch.aten.item %2778 : !torch.vtensor<[1],si64> -> !torch.int
%2780 = torch.aten.unsqueeze %2707, %2779 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2781 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_738 = torch.constant.int 0
%int0_739 = torch.constant.int 0
%int0_740 = torch.constant.int 0
%2782 = torch.aten.select.int %2781, %int0_738, %int0_740 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2783 = torch.aten.item %2782 : !torch.vtensor<[1],si64> -> !torch.int
%2784 = torch.aten.lt.int %2783, %int0_738 : !torch.int, !torch.int -> !torch.bool
%2785 = torch.aten.Int.bool %2784 : !torch.bool -> !torch.int
%2786 = torch.aten.mul.int %2785, %int0_739 : !torch.int, !torch.int -> !torch.int
%2787 = torch.aten.add.int %2783, %2786 : !torch.int, !torch.int -> !torch.int
%2788 = torch.prim.ListConstruct %2787 : (!torch.int) -> !torch.list<int>
%false_741 = torch.constant.bool false
%none_742 = torch.constant.none
%2789 = torch.aten.tensor %2788, %none_742, %none_742, %false_741 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_743, %indices_744 = torch.aten.sort %2789, %int0_738, %false_741 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_745 = torch.constant.int 0
%2790 = torch.aten.select.int %values_743, %int0_738, %int0_745 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2791 = torch.aten.item %2790 : !torch.vtensor<[1],si64> -> !torch.int
%2792 = torch.aten.unsqueeze %2708, %2791 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2793 = torch.prim.ListConstruct %2780, %2792 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_746 = torch.constant.int 0
%2794 = torch.aten.cat %2793, %int0_746 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%2795 = torch.aten._shape_as_tensor %2768 : !torch.vtensor<[1,256,1,1],f32> -> !torch.vtensor<[4],si64>
%2796 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2797 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2798 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_747 = torch.constant.none
%int1_748 = torch.constant.int 1
%2799 = torch.prim.ListConstruct %int1_748 : (!torch.int) -> !torch.list<int>
%2800 = torch.aten.ones %2799, %none_747, %none_747, %none_747, %none_747 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_749 = torch.constant.int 0
%int0_750 = torch.constant.int 0
%2801 = torch.prim.NumToTensor.Scalar %int0_750 : !torch.int -> !torch.vtensor<[1],si64>
%2802 = torch.aten.index_select %2797, %int0_749, %2801 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2803 = torch.aten.item %2802 : !torch.vtensor<[1],si64> -> !torch.int
%2804 = torch.aten.index_select %2798, %int0_749, %2801 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2805 = torch.aten.item %2804 : !torch.vtensor<[1],si64> -> !torch.int
%2806 = torch.aten.index_select %2796, %int0_749, %2801 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2807 = torch.aten.item %2806 : !torch.vtensor<[1],si64> -> !torch.int
%2808 = torch.aten.index_select %2800, %int0_749, %2801 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2809 = torch.aten.item %2808 : !torch.vtensor<[1],si64> -> !torch.int
%2810 = torch.aten.slice.Tensor %2795, %2807, %2803, %2805, %2809 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_751 = torch.constant.int 4
%none_752 = torch.constant.none
%false_753 = torch.constant.bool false
%2811 = torch.aten.to.dtype %2794, %int4_751, %false_753, %false_753, %none_752 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%2812 = torch.prim.ListConstruct %2810, %2811 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_754 = torch.constant.int 0
%2813 = torch.aten.cat %2812, %int0_754 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%2814 = torch.operator "onnx.Resize"(%2768, %none, %none, %2813) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,256,1,1],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%2815 = torch.prim.ListConstruct %2616, %2646, %2676, %2706, %2814 : (!torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[?,?,?,?],f32>) -> !torch.list<vtensor>
%int1_755 = torch.constant.int 1
%2816 = torch.aten.cat %2815, %int1_755 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,28,28],f32>
%2817 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2818 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_756 = torch.constant.int 12
%2819 = torch.aten.item %2817 : !torch.vtensor<[],f32> -> !torch.float
%2820 = torch.aten.item %2818 : !torch.vtensor<[],si8> -> !torch.int
%2821 = torch.aten.quantize_per_tensor %2816, %2819, %2820, %int12_756 : !torch.vtensor<[1,?,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,28,28],!torch.qint8>
%2822 = torch.aten.int_repr %2821 : !torch.vtensor<[1,?,28,28],!torch.qint8> -> !torch.vtensor<[1,?,28,28],si8>
%2823 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2824 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2825 = torch.aten.item %2823 : !torch.vtensor<[],f32> -> !torch.float
%2826 = torch.aten.item %2824 : !torch.vtensor<[],si8> -> !torch.int
%2827 = torch.aten._make_per_tensor_quantized_tensor %2822, %2825, %2826 : !torch.vtensor<[1,?,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,28,28],!torch.qint8>
%2828 = torch.aten.dequantize.self %2827 : !torch.vtensor<[1,?,28,28],!torch.qint8> -> !torch.vtensor<[1,?,28,28],f32>
%2829 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2830 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_757 = torch.constant.int 12
%2831 = torch.aten.item %2829 : !torch.vtensor<[],f32> -> !torch.float
%2832 = torch.aten.item %2830 : !torch.vtensor<[],si8> -> !torch.int
%2833 = torch.aten.quantize_per_tensor %116, %2831, %2832, %int12_757 : !torch.vtensor<[256,1280,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1280,1,1],!torch.qint8>
%2834 = torch.aten.int_repr %2833 : !torch.vtensor<[256,1280,1,1],!torch.qint8> -> !torch.vtensor<[256,1280,1,1],si8>
%2835 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2836 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2837 = torch.aten.item %2835 : !torch.vtensor<[],f32> -> !torch.float
%2838 = torch.aten.item %2836 : !torch.vtensor<[],si8> -> !torch.int
%2839 = torch.aten._make_per_tensor_quantized_tensor %2834, %2837, %2838 : !torch.vtensor<[256,1280,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1280,1,1],!torch.qint8>
%2840 = torch.aten.dequantize.self %2839 : !torch.vtensor<[256,1280,1,1],!torch.qint8> -> !torch.vtensor<[256,1280,1,1],f32>
%2841 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2842 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_758 = torch.constant.int 12
%2843 = torch.aten.item %2841 : !torch.vtensor<[],f32> -> !torch.float
%2844 = torch.aten.item %2842 : !torch.vtensor<[],si8> -> !torch.int
%2845 = torch.aten.quantize_per_tensor %117, %2843, %2844, %int12_758 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2846 = torch.aten.int_repr %2845 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2847 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2848 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2849 = torch.aten.item %2847 : !torch.vtensor<[],f32> -> !torch.float
%2850 = torch.aten.item %2848 : !torch.vtensor<[],si8> -> !torch.int
%2851 = torch.aten._make_per_tensor_quantized_tensor %2846, %2849, %2850 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2852 = torch.aten.dequantize.self %2851 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_759 = torch.constant.int 0
%int0_760 = torch.constant.int 0
%int1_761 = torch.constant.int 1
%int1_762 = torch.constant.int 1
%int1_763 = torch.constant.int 1
%int1_764 = torch.constant.int 1
%int0_765 = torch.constant.int 0
%2853 = torch.prim.ListConstruct %int0_759, %int0_760 : (!torch.int, !torch.int) -> !torch.list<int>
%2854 = torch.prim.ListConstruct %int1_761, %int1_762 : (!torch.int, !torch.int) -> !torch.list<int>
%2855 = torch.prim.ListConstruct %int1_763, %int1_764 : (!torch.int, !torch.int) -> !torch.list<int>
%2856 = torch.prim.ListConstruct %int0_765, %int0_765 : (!torch.int, !torch.int) -> !torch.list<int>
%false_766 = torch.constant.bool false
%int1_767 = torch.constant.int 1
%2857 = torch.aten.convolution %2828, %2840, %2852, %2855, %2853, %2854, %false_766, %2856, %int1_767 : !torch.vtensor<[1,?,28,28],f32>, !torch.vtensor<[256,1280,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2858 = torch.aten.relu %2857 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2859 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2860 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_768 = torch.constant.int 12
%2861 = torch.aten.item %2859 : !torch.vtensor<[],f32> -> !torch.float
%2862 = torch.aten.item %2860 : !torch.vtensor<[],si8> -> !torch.int
%2863 = torch.aten.quantize_per_tensor %2858, %2861, %2862, %int12_768 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2864 = torch.aten.int_repr %2863 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%2865 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2866 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2867 = torch.aten.item %2865 : !torch.vtensor<[],f32> -> !torch.float
%2868 = torch.aten.item %2866 : !torch.vtensor<[],si8> -> !torch.int
%2869 = torch.aten._make_per_tensor_quantized_tensor %2864, %2867, %2868 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2870 = torch.aten.dequantize.self %2869 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%2871 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2872 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_769 = torch.constant.int 12
%2873 = torch.aten.item %2871 : !torch.vtensor<[],f32> -> !torch.float
%2874 = torch.aten.item %2872 : !torch.vtensor<[],si8> -> !torch.int
%2875 = torch.aten.quantize_per_tensor %118, %2873, %2874, %int12_769 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%2876 = torch.aten.int_repr %2875 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%2877 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2878 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2879 = torch.aten.item %2877 : !torch.vtensor<[],f32> -> !torch.float
%2880 = torch.aten.item %2878 : !torch.vtensor<[],si8> -> !torch.int
%2881 = torch.aten._make_per_tensor_quantized_tensor %2876, %2879, %2880 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%2882 = torch.aten.dequantize.self %2881 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%2883 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2884 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_770 = torch.constant.int 12
%2885 = torch.aten.item %2883 : !torch.vtensor<[],f32> -> !torch.float
%2886 = torch.aten.item %2884 : !torch.vtensor<[],si8> -> !torch.int
%2887 = torch.aten.quantize_per_tensor %119, %2885, %2886, %int12_770 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2888 = torch.aten.int_repr %2887 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2889 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2890 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2891 = torch.aten.item %2889 : !torch.vtensor<[],f32> -> !torch.float
%2892 = torch.aten.item %2890 : !torch.vtensor<[],si8> -> !torch.int
%2893 = torch.aten._make_per_tensor_quantized_tensor %2888, %2891, %2892 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2894 = torch.aten.dequantize.self %2893 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_771 = torch.constant.int 1
%int1_772 = torch.constant.int 1
%int1_773 = torch.constant.int 1
%int1_774 = torch.constant.int 1
%int1_775 = torch.constant.int 1
%int1_776 = torch.constant.int 1
%int0_777 = torch.constant.int 0
%2895 = torch.prim.ListConstruct %int1_771, %int1_772 : (!torch.int, !torch.int) -> !torch.list<int>
%2896 = torch.prim.ListConstruct %int1_773, %int1_774 : (!torch.int, !torch.int) -> !torch.list<int>
%2897 = torch.prim.ListConstruct %int1_775, %int1_776 : (!torch.int, !torch.int) -> !torch.list<int>
%2898 = torch.prim.ListConstruct %int0_777, %int0_777 : (!torch.int, !torch.int) -> !torch.list<int>
%false_778 = torch.constant.bool false
%int1_779 = torch.constant.int 1
%2899 = torch.aten.convolution %2870, %2882, %2894, %2897, %2895, %2896, %false_778, %2898, %int1_779 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%2900 = torch.aten.relu %2899 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%2901 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2902 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_780 = torch.constant.int 12
%2903 = torch.aten.item %2901 : !torch.vtensor<[],f32> -> !torch.float
%2904 = torch.aten.item %2902 : !torch.vtensor<[],si8> -> !torch.int
%2905 = torch.aten.quantize_per_tensor %2900, %2903, %2904, %int12_780 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2906 = torch.aten.int_repr %2905 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%2907 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2908 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2909 = torch.aten.item %2907 : !torch.vtensor<[],f32> -> !torch.float
%2910 = torch.aten.item %2908 : !torch.vtensor<[],si8> -> !torch.int
%2911 = torch.aten._make_per_tensor_quantized_tensor %2906, %2909, %2910 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%2912 = torch.aten.dequantize.self %2911 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%2913 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2914 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_781 = torch.constant.int 12
%2915 = torch.aten.item %2913 : !torch.vtensor<[],f32> -> !torch.float
%2916 = torch.aten.item %2914 : !torch.vtensor<[],si8> -> !torch.int
%2917 = torch.aten.quantize_per_tensor %120, %2915, %2916, %int12_781 : !torch.vtensor<[21,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21,256,1,1],!torch.qint8>
%2918 = torch.aten.int_repr %2917 : !torch.vtensor<[21,256,1,1],!torch.qint8> -> !torch.vtensor<[21,256,1,1],si8>
%2919 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2920 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2921 = torch.aten.item %2919 : !torch.vtensor<[],f32> -> !torch.float
%2922 = torch.aten.item %2920 : !torch.vtensor<[],si8> -> !torch.int
%2923 = torch.aten._make_per_tensor_quantized_tensor %2918, %2921, %2922 : !torch.vtensor<[21,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[21,256,1,1],!torch.qint8>
%2924 = torch.aten.dequantize.self %2923 : !torch.vtensor<[21,256,1,1],!torch.qint8> -> !torch.vtensor<[21,256,1,1],f32>
%2925 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2926 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_782 = torch.constant.int 12
%2927 = torch.aten.item %2925 : !torch.vtensor<[],f32> -> !torch.float
%2928 = torch.aten.item %2926 : !torch.vtensor<[],si8> -> !torch.int
%2929 = torch.aten.quantize_per_tensor %121, %2927, %2928, %int12_782 : !torch.vtensor<[21],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%2930 = torch.aten.int_repr %2929 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],si8>
%2931 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2932 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2933 = torch.aten.item %2931 : !torch.vtensor<[],f32> -> !torch.float
%2934 = torch.aten.item %2932 : !torch.vtensor<[],si8> -> !torch.int
%2935 = torch.aten._make_per_tensor_quantized_tensor %2930, %2933, %2934 : !torch.vtensor<[21],si8>, !torch.float, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%2936 = torch.aten.dequantize.self %2935 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],f32>
%int0_783 = torch.constant.int 0
%int0_784 = torch.constant.int 0
%int1_785 = torch.constant.int 1
%int1_786 = torch.constant.int 1
%int1_787 = torch.constant.int 1
%int1_788 = torch.constant.int 1
%int0_789 = torch.constant.int 0
%2937 = torch.prim.ListConstruct %int0_783, %int0_784 : (!torch.int, !torch.int) -> !torch.list<int>
%2938 = torch.prim.ListConstruct %int1_785, %int1_786 : (!torch.int, !torch.int) -> !torch.list<int>
%2939 = torch.prim.ListConstruct %int1_787, %int1_788 : (!torch.int, !torch.int) -> !torch.list<int>
%2940 = torch.prim.ListConstruct %int0_789, %int0_789 : (!torch.int, !torch.int) -> !torch.list<int>
%false_790 = torch.constant.bool false
%int1_791 = torch.constant.int 1
%2941 = torch.aten.convolution %2912, %2924, %2936, %2939, %2937, %2938, %false_790, %2940, %int1_791 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[21,256,1,1],f32>, !torch.vtensor<[21],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,21,28,28],f32>
%2942 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2943 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_792 = torch.constant.int 12
%2944 = torch.aten.item %2942 : !torch.vtensor<[],f32> -> !torch.float
%2945 = torch.aten.item %2943 : !torch.vtensor<[],si8> -> !torch.int
%2946 = torch.aten.quantize_per_tensor %2941, %2944, %2945, %int12_792 : !torch.vtensor<[1,21,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,21,28,28],!torch.qint8>
%2947 = torch.aten.int_repr %2946 : !torch.vtensor<[1,21,28,28],!torch.qint8> -> !torch.vtensor<[1,21,28,28],si8>
%2948 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2949 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2950 = torch.aten.item %2948 : !torch.vtensor<[],f32> -> !torch.float
%2951 = torch.aten.item %2949 : !torch.vtensor<[],si8> -> !torch.int
%2952 = torch.aten._make_per_tensor_quantized_tensor %2947, %2950, %2951 : !torch.vtensor<[1,21,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,21,28,28],!torch.qint8>
%2953 = torch.aten.dequantize.self %2952 : !torch.vtensor<[1,21,28,28],!torch.qint8> -> !torch.vtensor<[1,21,28,28],f32>
%2954 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_793 = torch.constant.int 0
%int0_794 = torch.constant.int 0
%int0_795 = torch.constant.int 0
%2955 = torch.aten.select.int %2954, %int0_793, %int0_795 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2956 = torch.aten.item %2955 : !torch.vtensor<[1],si64> -> !torch.int
%2957 = torch.aten.lt.int %2956, %int0_793 : !torch.int, !torch.int -> !torch.bool
%2958 = torch.aten.Int.bool %2957 : !torch.bool -> !torch.int
%2959 = torch.aten.mul.int %2958, %int0_794 : !torch.int, !torch.int -> !torch.int
%2960 = torch.aten.add.int %2956, %2959 : !torch.int, !torch.int -> !torch.int
%2961 = torch.prim.ListConstruct %2960 : (!torch.int) -> !torch.list<int>
%false_796 = torch.constant.bool false
%none_797 = torch.constant.none
%2962 = torch.aten.tensor %2961, %none_797, %none_797, %false_796 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_798, %indices_799 = torch.aten.sort %2962, %int0_793, %false_796 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_800 = torch.constant.int 0
%2963 = torch.aten.select.int %values_798, %int0_793, %int0_800 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2964 = torch.aten.item %2963 : !torch.vtensor<[1],si64> -> !torch.int
%2965 = torch.aten.unsqueeze %138, %2964 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2966 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_801 = torch.constant.int 0
%int0_802 = torch.constant.int 0
%int0_803 = torch.constant.int 0
%2967 = torch.aten.select.int %2966, %int0_801, %int0_803 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2968 = torch.aten.item %2967 : !torch.vtensor<[1],si64> -> !torch.int
%2969 = torch.aten.lt.int %2968, %int0_801 : !torch.int, !torch.int -> !torch.bool
%2970 = torch.aten.Int.bool %2969 : !torch.bool -> !torch.int
%2971 = torch.aten.mul.int %2970, %int0_802 : !torch.int, !torch.int -> !torch.int
%2972 = torch.aten.add.int %2968, %2971 : !torch.int, !torch.int -> !torch.int
%2973 = torch.prim.ListConstruct %2972 : (!torch.int) -> !torch.list<int>
%false_804 = torch.constant.bool false
%none_805 = torch.constant.none
%2974 = torch.aten.tensor %2973, %none_805, %none_805, %false_804 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_806, %indices_807 = torch.aten.sort %2974, %int0_801, %false_804 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_808 = torch.constant.int 0
%2975 = torch.aten.select.int %values_806, %int0_801, %int0_808 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2976 = torch.aten.item %2975 : !torch.vtensor<[1],si64> -> !torch.int
%2977 = torch.aten.unsqueeze %139, %2976 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2978 = torch.prim.ListConstruct %2965, %2977 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_809 = torch.constant.int 0
%2979 = torch.aten.cat %2978, %int0_809 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%2980 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_810 = torch.constant.int 0
%int0_811 = torch.constant.int 0
%int0_812 = torch.constant.int 0
%2981 = torch.aten.select.int %2980, %int0_810, %int0_812 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2982 = torch.aten.item %2981 : !torch.vtensor<[1],si64> -> !torch.int
%2983 = torch.aten.lt.int %2982, %int0_810 : !torch.int, !torch.int -> !torch.bool
%2984 = torch.aten.Int.bool %2983 : !torch.bool -> !torch.int
%2985 = torch.aten.mul.int %2984, %int0_811 : !torch.int, !torch.int -> !torch.int
%2986 = torch.aten.add.int %2982, %2985 : !torch.int, !torch.int -> !torch.int
%2987 = torch.prim.ListConstruct %2986 : (!torch.int) -> !torch.list<int>
%false_813 = torch.constant.bool false
%none_814 = torch.constant.none
%2988 = torch.aten.tensor %2987, %none_814, %none_814, %false_813 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_815, %indices_816 = torch.aten.sort %2988, %int0_810, %false_813 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_817 = torch.constant.int 0
%2989 = torch.aten.select.int %values_815, %int0_810, %int0_817 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2990 = torch.aten.item %2989 : !torch.vtensor<[1],si64> -> !torch.int
%2991 = torch.aten.unsqueeze %138, %2990 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2992 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_818 = torch.constant.int 0
%int0_819 = torch.constant.int 0
%int0_820 = torch.constant.int 0
%2993 = torch.aten.select.int %2992, %int0_818, %int0_820 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2994 = torch.aten.item %2993 : !torch.vtensor<[1],si64> -> !torch.int
%2995 = torch.aten.lt.int %2994, %int0_818 : !torch.int, !torch.int -> !torch.bool
%2996 = torch.aten.Int.bool %2995 : !torch.bool -> !torch.int
%2997 = torch.aten.mul.int %2996, %int0_819 : !torch.int, !torch.int -> !torch.int
%2998 = torch.aten.add.int %2994, %2997 : !torch.int, !torch.int -> !torch.int
%2999 = torch.prim.ListConstruct %2998 : (!torch.int) -> !torch.list<int>
%false_821 = torch.constant.bool false
%none_822 = torch.constant.none
%3000 = torch.aten.tensor %2999, %none_822, %none_822, %false_821 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_823, %indices_824 = torch.aten.sort %3000, %int0_818, %false_821 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_825 = torch.constant.int 0
%3001 = torch.aten.select.int %values_823, %int0_818, %int0_825 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3002 = torch.aten.item %3001 : !torch.vtensor<[1],si64> -> !torch.int
%3003 = torch.aten.unsqueeze %139, %3002 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%3004 = torch.prim.ListConstruct %2991, %3003 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_826 = torch.constant.int 0
%3005 = torch.aten.cat %3004, %int0_826 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%3006 = torch.aten._shape_as_tensor %2953 : !torch.vtensor<[1,21,28,28],f32> -> !torch.vtensor<[4],si64>
%3007 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3008 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3009 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_827 = torch.constant.none
%int1_828 = torch.constant.int 1
%3010 = torch.prim.ListConstruct %int1_828 : (!torch.int) -> !torch.list<int>
%3011 = torch.aten.ones %3010, %none_827, %none_827, %none_827, %none_827 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_829 = torch.constant.int 0
%int0_830 = torch.constant.int 0
%3012 = torch.prim.NumToTensor.Scalar %int0_830 : !torch.int -> !torch.vtensor<[1],si64>
%3013 = torch.aten.index_select %3008, %int0_829, %3012 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3014 = torch.aten.item %3013 : !torch.vtensor<[1],si64> -> !torch.int
%3015 = torch.aten.index_select %3009, %int0_829, %3012 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3016 = torch.aten.item %3015 : !torch.vtensor<[1],si64> -> !torch.int
%3017 = torch.aten.index_select %3007, %int0_829, %3012 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3018 = torch.aten.item %3017 : !torch.vtensor<[1],si64> -> !torch.int
%3019 = torch.aten.index_select %3011, %int0_829, %3012 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3020 = torch.aten.item %3019 : !torch.vtensor<[1],si64> -> !torch.int
%3021 = torch.aten.slice.Tensor %3006, %3018, %3014, %3016, %3020 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_831 = torch.constant.int 4
%none_832 = torch.constant.none
%false_833 = torch.constant.bool false
%3022 = torch.aten.to.dtype %2979, %int4_831, %false_833, %false_833, %none_832 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%3023 = torch.prim.ListConstruct %3021, %3022 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_834 = torch.constant.int 0
%3024 = torch.aten.cat %3023, %int0_834 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%3025 = torch.operator "onnx.Resize"(%2953, %none, %none, %3024) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,21,28,28],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%3026 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3027 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_835 = torch.constant.int 12
%3028 = torch.aten.item %3026 : !torch.vtensor<[],f32> -> !torch.float
%3029 = torch.aten.item %3027 : !torch.vtensor<[],si8> -> !torch.int
%3030 = torch.aten.quantize_per_tensor %3025, %3028, %3029, %int12_835 : !torch.vtensor<[?,?,?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,?,?,?],!torch.qint8>
%3031 = torch.aten.int_repr %3030 : !torch.vtensor<[?,?,?,?],!torch.qint8> -> !torch.vtensor<[?,?,?,?],si8>
%3032 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3033 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3034 = torch.aten.item %3032 : !torch.vtensor<[],f32> -> !torch.float
%3035 = torch.aten.item %3033 : !torch.vtensor<[],si8> -> !torch.int
%3036 = torch.aten._make_per_tensor_quantized_tensor %3031, %3034, %3035 : !torch.vtensor<[?,?,?,?],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,21,224,224],!torch.qint8>
%3037 = torch.aten.dequantize.self %3036 : !torch.vtensor<[1,21,224,224],!torch.qint8> -> !torch.vtensor<[1,21,224,224],f32>
%3038 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3039 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_836 = torch.constant.int 12
%3040 = torch.aten.item %3038 : !torch.vtensor<[],f32> -> !torch.float
%3041 = torch.aten.item %3039 : !torch.vtensor<[],si8> -> !torch.int
%3042 = torch.aten.quantize_per_tensor %122, %3040, %3041, %int12_836 : !torch.vtensor<[256,1024,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,3,3],!torch.qint8>
%3043 = torch.aten.int_repr %3042 : !torch.vtensor<[256,1024,3,3],!torch.qint8> -> !torch.vtensor<[256,1024,3,3],si8>
%3044 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3045 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3046 = torch.aten.item %3044 : !torch.vtensor<[],f32> -> !torch.float
%3047 = torch.aten.item %3045 : !torch.vtensor<[],si8> -> !torch.int
%3048 = torch.aten._make_per_tensor_quantized_tensor %3043, %3046, %3047 : !torch.vtensor<[256,1024,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,3,3],!torch.qint8>
%3049 = torch.aten.dequantize.self %3048 : !torch.vtensor<[256,1024,3,3],!torch.qint8> -> !torch.vtensor<[256,1024,3,3],f32>
%3050 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3051 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_837 = torch.constant.int 12
%3052 = torch.aten.item %3050 : !torch.vtensor<[],f32> -> !torch.float
%3053 = torch.aten.item %3051 : !torch.vtensor<[],si8> -> !torch.int
%3054 = torch.aten.quantize_per_tensor %123, %3052, %3053, %int12_837 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3055 = torch.aten.int_repr %3054 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3056 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3057 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3058 = torch.aten.item %3056 : !torch.vtensor<[],f32> -> !torch.float
%3059 = torch.aten.item %3057 : !torch.vtensor<[],si8> -> !torch.int
%3060 = torch.aten._make_per_tensor_quantized_tensor %3055, %3058, %3059 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3061 = torch.aten.dequantize.self %3060 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_838 = torch.constant.int 1
%int1_839 = torch.constant.int 1
%int1_840 = torch.constant.int 1
%int1_841 = torch.constant.int 1
%int1_842 = torch.constant.int 1
%int1_843 = torch.constant.int 1
%int0_844 = torch.constant.int 0
%3062 = torch.prim.ListConstruct %int1_838, %int1_839 : (!torch.int, !torch.int) -> !torch.list<int>
%3063 = torch.prim.ListConstruct %int1_840, %int1_841 : (!torch.int, !torch.int) -> !torch.list<int>
%3064 = torch.prim.ListConstruct %int1_842, %int1_843 : (!torch.int, !torch.int) -> !torch.list<int>
%3065 = torch.prim.ListConstruct %int0_844, %int0_844 : (!torch.int, !torch.int) -> !torch.list<int>
%false_845 = torch.constant.bool false
%int1_846 = torch.constant.int 1
%3066 = torch.aten.convolution %2128, %3049, %3061, %3064, %3062, %3063, %false_845, %3065, %int1_846 : !torch.vtensor<[1,1024,28,28],f32>, !torch.vtensor<[256,1024,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%3067 = torch.aten.relu %3066 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%3068 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3069 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_847 = torch.constant.int 12
%3070 = torch.aten.item %3068 : !torch.vtensor<[],f32> -> !torch.float
%3071 = torch.aten.item %3069 : !torch.vtensor<[],si8> -> !torch.int
%3072 = torch.aten.quantize_per_tensor %3067, %3070, %3071, %int12_847 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%3073 = torch.aten.int_repr %3072 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%3074 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3075 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3076 = torch.aten.item %3074 : !torch.vtensor<[],f32> -> !torch.float
%3077 = torch.aten.item %3075 : !torch.vtensor<[],si8> -> !torch.int
%3078 = torch.aten._make_per_tensor_quantized_tensor %3073, %3076, %3077 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%3079 = torch.aten.dequantize.self %3078 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%3080 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3081 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_848 = torch.constant.int 12
%3082 = torch.aten.item %3080 : !torch.vtensor<[],f32> -> !torch.float
%3083 = torch.aten.item %3081 : !torch.vtensor<[],si8> -> !torch.int
%3084 = torch.aten.quantize_per_tensor %124, %3082, %3083, %int12_848 : !torch.vtensor<[21,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21,256,1,1],!torch.qint8>
%3085 = torch.aten.int_repr %3084 : !torch.vtensor<[21,256,1,1],!torch.qint8> -> !torch.vtensor<[21,256,1,1],si8>
%3086 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3087 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3088 = torch.aten.item %3086 : !torch.vtensor<[],f32> -> !torch.float
%3089 = torch.aten.item %3087 : !torch.vtensor<[],si8> -> !torch.int
%3090 = torch.aten._make_per_tensor_quantized_tensor %3085, %3088, %3089 : !torch.vtensor<[21,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[21,256,1,1],!torch.qint8>
%3091 = torch.aten.dequantize.self %3090 : !torch.vtensor<[21,256,1,1],!torch.qint8> -> !torch.vtensor<[21,256,1,1],f32>
%3092 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3093 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_849 = torch.constant.int 12
%3094 = torch.aten.item %3092 : !torch.vtensor<[],f32> -> !torch.float
%3095 = torch.aten.item %3093 : !torch.vtensor<[],si8> -> !torch.int
%3096 = torch.aten.quantize_per_tensor %125, %3094, %3095, %int12_849 : !torch.vtensor<[21],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%3097 = torch.aten.int_repr %3096 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],si8>
%3098 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3099 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3100 = torch.aten.item %3098 : !torch.vtensor<[],f32> -> !torch.float
%3101 = torch.aten.item %3099 : !torch.vtensor<[],si8> -> !torch.int
%3102 = torch.aten._make_per_tensor_quantized_tensor %3097, %3100, %3101 : !torch.vtensor<[21],si8>, !torch.float, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%3103 = torch.aten.dequantize.self %3102 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],f32>
%int0_850 = torch.constant.int 0
%int0_851 = torch.constant.int 0
%int1_852 = torch.constant.int 1
%int1_853 = torch.constant.int 1
%int1_854 = torch.constant.int 1
%int1_855 = torch.constant.int 1
%int0_856 = torch.constant.int 0
%3104 = torch.prim.ListConstruct %int0_850, %int0_851 : (!torch.int, !torch.int) -> !torch.list<int>
%3105 = torch.prim.ListConstruct %int1_852, %int1_853 : (!torch.int, !torch.int) -> !torch.list<int>
%3106 = torch.prim.ListConstruct %int1_854, %int1_855 : (!torch.int, !torch.int) -> !torch.list<int>
%3107 = torch.prim.ListConstruct %int0_856, %int0_856 : (!torch.int, !torch.int) -> !torch.list<int>
%false_857 = torch.constant.bool false
%int1_858 = torch.constant.int 1
%3108 = torch.aten.convolution %3079, %3091, %3103, %3106, %3104, %3105, %false_857, %3107, %int1_858 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[21,256,1,1],f32>, !torch.vtensor<[21],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,21,28,28],f32>
%3109 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_859 = torch.constant.int 12
%3111 = torch.aten.item %3109 : !torch.vtensor<[],f32> -> !torch.float
%3112 = torch.aten.item %3110 : !torch.vtensor<[],si8> -> !torch.int
%3113 = torch.aten.quantize_per_tensor %3108, %3111, %3112, %int12_859 : !torch.vtensor<[1,21,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,21,28,28],!torch.qint8>
%3114 = torch.aten.int_repr %3113 : !torch.vtensor<[1,21,28,28],!torch.qint8> -> !torch.vtensor<[1,21,28,28],si8>
%3115 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3116 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3117 = torch.aten.item %3115 : !torch.vtensor<[],f32> -> !torch.float
%3118 = torch.aten.item %3116 : !torch.vtensor<[],si8> -> !torch.int
%3119 = torch.aten._make_per_tensor_quantized_tensor %3114, %3117, %3118 : !torch.vtensor<[1,21,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,21,28,28],!torch.qint8>
%3120 = torch.aten.dequantize.self %3119 : !torch.vtensor<[1,21,28,28],!torch.qint8> -> !torch.vtensor<[1,21,28,28],f32>
%3121 = torch.aten._shape_as_tensor %3120 : !torch.vtensor<[1,21,28,28],f32> -> !torch.vtensor<[4],si64>
%3122 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3123 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3124 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_860 = torch.constant.none
%int1_861 = torch.constant.int 1
%3125 = torch.prim.ListConstruct %int1_861 : (!torch.int) -> !torch.list<int>
%3126 = torch.aten.ones %3125, %none_860, %none_860, %none_860, %none_860 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_862 = torch.constant.int 0
%int0_863 = torch.constant.int 0
%3127 = torch.prim.NumToTensor.Scalar %int0_863 : !torch.int -> !torch.vtensor<[1],si64>
%3128 = torch.aten.index_select %3123, %int0_862, %3127 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3129 = torch.aten.item %3128 : !torch.vtensor<[1],si64> -> !torch.int
%3130 = torch.aten.index_select %3124, %int0_862, %3127 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3131 = torch.aten.item %3130 : !torch.vtensor<[1],si64> -> !torch.int
%3132 = torch.aten.index_select %3122, %int0_862, %3127 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3133 = torch.aten.item %3132 : !torch.vtensor<[1],si64> -> !torch.int
%3134 = torch.aten.index_select %3126, %int0_862, %3127 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3135 = torch.aten.item %3134 : !torch.vtensor<[1],si64> -> !torch.int
%3136 = torch.aten.slice.Tensor %3121, %3133, %3129, %3131, %3135 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_864 = torch.constant.int 4
%none_865 = torch.constant.none
%false_866 = torch.constant.bool false
%3137 = torch.aten.to.dtype %3005, %int4_864, %false_866, %false_866, %none_865 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%3138 = torch.prim.ListConstruct %3136, %3137 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_867 = torch.constant.int 0
%3139 = torch.aten.cat %3138, %int0_867 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%3140 = torch.operator "onnx.Resize"(%3120, %none, %none, %3139) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,21,28,28],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%3141 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3142 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_868 = torch.constant.int 12
%3143 = torch.aten.item %3141 : !torch.vtensor<[],f32> -> !torch.float
%3144 = torch.aten.item %3142 : !torch.vtensor<[],si8> -> !torch.int
%3145 = torch.aten.quantize_per_tensor %3140, %3143, %3144, %int12_868 : !torch.vtensor<[?,?,?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,?,?,?],!torch.qint8>
%3146 = torch.aten.int_repr %3145 : !torch.vtensor<[?,?,?,?],!torch.qint8> -> !torch.vtensor<[?,?,?,?],si8>
%3147 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3148 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3149 = torch.aten.item %3147 : !torch.vtensor<[],f32> -> !torch.float
%3150 = torch.aten.item %3148 : !torch.vtensor<[],si8> -> !torch.int
%3151 = torch.aten._make_per_tensor_quantized_tensor %3146, %3149, %3150 : !torch.vtensor<[?,?,?,?],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,21,224,224],!torch.qint8>
%3152 = torch.aten.dequantize.self %3151 : !torch.vtensor<[1,21,224,224],!torch.qint8> -> !torch.vtensor<[1,21,224,224],f32>
return %3037, %3152 : !torch.vtensor<[1,21,224,224],f32>, !torch.vtensor<[1,21,224,224],f32>
}
}
@AmosLewis
Copy link
Author

%2814 = torch.operator "onnx.Resize"(%2768, %none, %none, %2813) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,256,1,1],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>

%3025 = torch.operator "onnx.Resize"(%2953, %none, %none, %3024) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,21,28,28],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>

%3140 = torch.operator "onnx.Resize"(%3120, %none, %none, %3139) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,21,28,28],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment