Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save AmosLewis/0b2daadbf68b26c6f7554318a0dab847 to your computer and use it in GitHub Desktop.
Save AmosLewis/0b2daadbf68b26c6f7554318a0dab847 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,3,320,320],f32>) -> (!torch.vtensor<[1,1,320,320],f32>, !torch.vtensor<[1,1,320,320],f32>, !torch.vtensor<[1,1,320,320],f32>, !torch.vtensor<[1,1,320,320],f32>, !torch.vtensor<[1,1,320,320],f32>, !torch.vtensor<[1,1,320,320],f32>, !torch.vtensor<[1,1,320,320],f32>) attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} {
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x3x3x3xf32>) : !torch.vtensor<[64,3,3,3],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x64x3x3xf32>) : !torch.vtensor<[128,64,3,3],f32>
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32>
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x64x3x3xf32>) : !torch.vtensor<[128,64,3,3],f32>
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x128x3x3xf32>) : !torch.vtensor<[256,128,3,3],f32>
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x256x3x3xf32>) : !torch.vtensor<[64,256,3,3],f32>
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x128x3x3xf32>) : !torch.vtensor<[256,128,3,3],f32>
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x256x3x3xf32>) : !torch.vtensor<[512,256,3,3],f32>
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x3x3xf32>) : !torch.vtensor<[128,512,3,3],f32>
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32>
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32>
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x256x3x3xf32>) : !torch.vtensor<[512,256,3,3],f32>
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x1024x3x3xf32>) : !torch.vtensor<[512,1024,3,3],f32>
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x3x3xf32>) : !torch.vtensor<[256,1024,3,3],f32>
%137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32>
%139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%142 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%144 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32>
%147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32>
%149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%151 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%152 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x3x3xf32>) : !torch.vtensor<[128,512,3,3],f32>
%153 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%154 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%155 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%156 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%157 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%158 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%159 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%160 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%161 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%162 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%163 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%164 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%165 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%166 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%167 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%168 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%169 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%170 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%171 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%172 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x256x3x3xf32>) : !torch.vtensor<[64,256,3,3],f32>
%173 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%174 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%175 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%176 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%177 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%178 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%179 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%180 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%181 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%182 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%183 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%184 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%185 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%186 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%187 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%188 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%189 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%190 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%191 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%192 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%193 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%194 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%195 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%196 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%197 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%198 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x64x3x3xf32>) : !torch.vtensor<[16,64,3,3],f32>
%199 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%200 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x3x3xf32>) : !torch.vtensor<[16,16,3,3],f32>
%201 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%202 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x3x3xf32>) : !torch.vtensor<[16,16,3,3],f32>
%203 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%204 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x3x3xf32>) : !torch.vtensor<[16,16,3,3],f32>
%205 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%206 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x3x3xf32>) : !torch.vtensor<[16,16,3,3],f32>
%207 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%208 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x3x3xf32>) : !torch.vtensor<[16,16,3,3],f32>
%209 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%210 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x3x3xf32>) : !torch.vtensor<[16,16,3,3],f32>
%211 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%212 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x32x3x3xf32>) : !torch.vtensor<[16,32,3,3],f32>
%213 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%214 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x32x3x3xf32>) : !torch.vtensor<[16,32,3,3],f32>
%215 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%216 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x32x3x3xf32>) : !torch.vtensor<[16,32,3,3],f32>
%217 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%218 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x32x3x3xf32>) : !torch.vtensor<[16,32,3,3],f32>
%219 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%220 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x32x3x3xf32>) : !torch.vtensor<[16,32,3,3],f32>
%221 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%222 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x32x3x3xf32>) : !torch.vtensor<[64,32,3,3],f32>
%223 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%224 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x64x3x3xf32>) : !torch.vtensor<[1,64,3,3],f32>
%225 = torch.vtensor.literal(dense<-0.169921875> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%226 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x64x3x3xf32>) : !torch.vtensor<[1,64,3,3],f32>
%227 = torch.vtensor.literal(dense<0.4140625> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%228 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x128x3x3xf32>) : !torch.vtensor<[1,128,3,3],f32>
%229 = torch.vtensor.literal(dense<-6.093750e-01> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%230 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x256x3x3xf32>) : !torch.vtensor<[1,256,3,3],f32>
%231 = torch.vtensor.literal(dense<0.15234375> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%232 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x512x3x3xf32>) : !torch.vtensor<[1,512,3,3],f32>
%233 = torch.vtensor.literal(dense<-2.343750e-01> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%234 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x512x3x3xf32>) : !torch.vtensor<[1,512,3,3],f32>
%235 = torch.vtensor.literal(dense<-0.4765625> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%236 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x6x1x1xf32>) : !torch.vtensor<[1,6,1,1],f32>
%237 = torch.vtensor.literal(dense<0.00122070313> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%none = torch.constant.none
%238 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%239 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12 = torch.constant.int 12
%240 = torch.aten.item %238 : !torch.vtensor<[],f32> -> !torch.float
%241 = torch.aten.item %239 : !torch.vtensor<[],si8> -> !torch.int
%242 = torch.aten.quantize_per_tensor %arg0, %240, %241, %int12 : !torch.vtensor<[1,3,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,3,320,320],!torch.qint8>
%243 = torch.aten.int_repr %242 : !torch.vtensor<[1,3,320,320],!torch.qint8> -> !torch.vtensor<[1,3,320,320],si8>
%244 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%245 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%246 = torch.aten.item %244 : !torch.vtensor<[],f32> -> !torch.float
%247 = torch.aten.item %245 : !torch.vtensor<[],si8> -> !torch.int
%248 = torch.aten._make_per_tensor_quantized_tensor %243, %246, %247 : !torch.vtensor<[1,3,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,3,320,320],!torch.qint8>
%249 = torch.aten.dequantize.self %248 : !torch.vtensor<[1,3,320,320],!torch.qint8> -> !torch.vtensor<[1,3,320,320],f32>
%250 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%251 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_0 = torch.constant.int 12
%252 = torch.aten.item %250 : !torch.vtensor<[],f32> -> !torch.float
%253 = torch.aten.item %251 : !torch.vtensor<[],si8> -> !torch.int
%254 = torch.aten.quantize_per_tensor %0, %252, %253, %int12_0 : !torch.vtensor<[64,3,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,3,3,3],!torch.qint8>
%255 = torch.aten.int_repr %254 : !torch.vtensor<[64,3,3,3],!torch.qint8> -> !torch.vtensor<[64,3,3,3],si8>
%256 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%257 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%258 = torch.aten.item %256 : !torch.vtensor<[],f32> -> !torch.float
%259 = torch.aten.item %257 : !torch.vtensor<[],si8> -> !torch.int
%260 = torch.aten._make_per_tensor_quantized_tensor %255, %258, %259 : !torch.vtensor<[64,3,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,3,3,3],!torch.qint8>
%261 = torch.aten.dequantize.self %260 : !torch.vtensor<[64,3,3,3],!torch.qint8> -> !torch.vtensor<[64,3,3,3],f32>
%262 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%263 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1 = torch.constant.int 12
%264 = torch.aten.item %262 : !torch.vtensor<[],f32> -> !torch.float
%265 = torch.aten.item %263 : !torch.vtensor<[],si8> -> !torch.int
%266 = torch.aten.quantize_per_tensor %1, %264, %265, %int12_1 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%267 = torch.aten.int_repr %266 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%268 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%269 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%270 = torch.aten.item %268 : !torch.vtensor<[],f32> -> !torch.float
%271 = torch.aten.item %269 : !torch.vtensor<[],si8> -> !torch.int
%272 = torch.aten._make_per_tensor_quantized_tensor %267, %270, %271 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%273 = torch.aten.dequantize.self %272 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1 = torch.constant.int 1
%int1_2 = torch.constant.int 1
%int1_3 = torch.constant.int 1
%int1_4 = torch.constant.int 1
%int1_5 = torch.constant.int 1
%int1_6 = torch.constant.int 1
%int0 = torch.constant.int 0
%274 = torch.prim.ListConstruct %int1, %int1_2 : (!torch.int, !torch.int) -> !torch.list<int>
%275 = torch.prim.ListConstruct %int1_3, %int1_4 : (!torch.int, !torch.int) -> !torch.list<int>
%276 = torch.prim.ListConstruct %int1_5, %int1_6 : (!torch.int, !torch.int) -> !torch.list<int>
%277 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%false = torch.constant.bool false
%int1_7 = torch.constant.int 1
%278 = torch.aten.convolution %249, %261, %273, %276, %274, %275, %false, %277, %int1_7 : !torch.vtensor<[1,3,320,320],f32>, !torch.vtensor<[64,3,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,320,320],f32>
%279 = torch.aten.relu %278 : !torch.vtensor<[1,64,320,320],f32> -> !torch.vtensor<[1,64,320,320],f32>
%280 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%281 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_8 = torch.constant.int 12
%282 = torch.aten.item %280 : !torch.vtensor<[],f32> -> !torch.float
%283 = torch.aten.item %281 : !torch.vtensor<[],si8> -> !torch.int
%284 = torch.aten.quantize_per_tensor %279, %282, %283, %int12_8 : !torch.vtensor<[1,64,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%285 = torch.aten.int_repr %284 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],si8>
%286 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%287 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%288 = torch.aten.item %286 : !torch.vtensor<[],f32> -> !torch.float
%289 = torch.aten.item %287 : !torch.vtensor<[],si8> -> !torch.int
%290 = torch.aten._make_per_tensor_quantized_tensor %285, %288, %289 : !torch.vtensor<[1,64,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%291 = torch.aten.dequantize.self %290 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],f32>
%292 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%293 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_9 = torch.constant.int 12
%294 = torch.aten.item %292 : !torch.vtensor<[],f32> -> !torch.float
%295 = torch.aten.item %293 : !torch.vtensor<[],si8> -> !torch.int
%296 = torch.aten.quantize_per_tensor %2, %294, %295, %int12_9 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%297 = torch.aten.int_repr %296 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%298 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%299 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%300 = torch.aten.item %298 : !torch.vtensor<[],f32> -> !torch.float
%301 = torch.aten.item %299 : !torch.vtensor<[],si8> -> !torch.int
%302 = torch.aten._make_per_tensor_quantized_tensor %297, %300, %301 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%303 = torch.aten.dequantize.self %302 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%304 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%305 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_10 = torch.constant.int 12
%306 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float
%307 = torch.aten.item %305 : !torch.vtensor<[],si8> -> !torch.int
%308 = torch.aten.quantize_per_tensor %3, %306, %307, %int12_10 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%309 = torch.aten.int_repr %308 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%310 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%311 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%312 = torch.aten.item %310 : !torch.vtensor<[],f32> -> !torch.float
%313 = torch.aten.item %311 : !torch.vtensor<[],si8> -> !torch.int
%314 = torch.aten._make_per_tensor_quantized_tensor %309, %312, %313 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%315 = torch.aten.dequantize.self %314 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_11 = torch.constant.int 1
%int1_12 = torch.constant.int 1
%int1_13 = torch.constant.int 1
%int1_14 = torch.constant.int 1
%int1_15 = torch.constant.int 1
%int1_16 = torch.constant.int 1
%int0_17 = torch.constant.int 0
%316 = torch.prim.ListConstruct %int1_11, %int1_12 : (!torch.int, !torch.int) -> !torch.list<int>
%317 = torch.prim.ListConstruct %int1_13, %int1_14 : (!torch.int, !torch.int) -> !torch.list<int>
%318 = torch.prim.ListConstruct %int1_15, %int1_16 : (!torch.int, !torch.int) -> !torch.list<int>
%319 = torch.prim.ListConstruct %int0_17, %int0_17 : (!torch.int, !torch.int) -> !torch.list<int>
%false_18 = torch.constant.bool false
%int1_19 = torch.constant.int 1
%320 = torch.aten.convolution %291, %303, %315, %318, %316, %317, %false_18, %319, %int1_19 : !torch.vtensor<[1,64,320,320],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,320,320],f32>
%321 = torch.aten.relu %320 : !torch.vtensor<[1,32,320,320],f32> -> !torch.vtensor<[1,32,320,320],f32>
%322 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%323 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_20 = torch.constant.int 12
%324 = torch.aten.item %322 : !torch.vtensor<[],f32> -> !torch.float
%325 = torch.aten.item %323 : !torch.vtensor<[],si8> -> !torch.int
%326 = torch.aten.quantize_per_tensor %321, %324, %325, %int12_20 : !torch.vtensor<[1,32,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,320,320],!torch.qint8>
%327 = torch.aten.int_repr %326 : !torch.vtensor<[1,32,320,320],!torch.qint8> -> !torch.vtensor<[1,32,320,320],si8>
%328 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%329 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%330 = torch.aten.item %328 : !torch.vtensor<[],f32> -> !torch.float
%331 = torch.aten.item %329 : !torch.vtensor<[],si8> -> !torch.int
%332 = torch.aten._make_per_tensor_quantized_tensor %327, %330, %331 : !torch.vtensor<[1,32,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,320,320],!torch.qint8>
%333 = torch.aten.dequantize.self %332 : !torch.vtensor<[1,32,320,320],!torch.qint8> -> !torch.vtensor<[1,32,320,320],f32>
%int2 = torch.constant.int 2
%int2_21 = torch.constant.int 2
%334 = torch.prim.ListConstruct %int2, %int2_21 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_22 = torch.constant.int 0
%int0_23 = torch.constant.int 0
%335 = torch.prim.ListConstruct %int0_22, %int0_23 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_24 = torch.constant.int 2
%int2_25 = torch.constant.int 2
%336 = torch.prim.ListConstruct %int2_24, %int2_25 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_26 = torch.constant.int 1
%int1_27 = torch.constant.int 1
%337 = torch.prim.ListConstruct %int1_26, %int1_27 : (!torch.int, !torch.int) -> !torch.list<int>
%true = torch.constant.bool true
%338 = torch.aten.max_pool2d %333, %334, %336, %335, %337, %true : !torch.vtensor<[1,32,320,320],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,160,160],f32>
%339 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%340 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_28 = torch.constant.int 12
%341 = torch.aten.item %339 : !torch.vtensor<[],f32> -> !torch.float
%342 = torch.aten.item %340 : !torch.vtensor<[],si8> -> !torch.int
%343 = torch.aten.quantize_per_tensor %338, %341, %342, %int12_28 : !torch.vtensor<[1,32,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%344 = torch.aten.int_repr %343 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],si8>
%345 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%346 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%347 = torch.aten.item %345 : !torch.vtensor<[],f32> -> !torch.float
%348 = torch.aten.item %346 : !torch.vtensor<[],si8> -> !torch.int
%349 = torch.aten._make_per_tensor_quantized_tensor %344, %347, %348 : !torch.vtensor<[1,32,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%350 = torch.aten.dequantize.self %349 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],f32>
%351 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%352 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_29 = torch.constant.int 12
%353 = torch.aten.item %351 : !torch.vtensor<[],f32> -> !torch.float
%354 = torch.aten.item %352 : !torch.vtensor<[],si8> -> !torch.int
%355 = torch.aten.quantize_per_tensor %4, %353, %354, %int12_29 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%356 = torch.aten.int_repr %355 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%357 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%358 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%359 = torch.aten.item %357 : !torch.vtensor<[],f32> -> !torch.float
%360 = torch.aten.item %358 : !torch.vtensor<[],si8> -> !torch.int
%361 = torch.aten._make_per_tensor_quantized_tensor %356, %359, %360 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%362 = torch.aten.dequantize.self %361 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%363 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%364 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_30 = torch.constant.int 12
%365 = torch.aten.item %363 : !torch.vtensor<[],f32> -> !torch.float
%366 = torch.aten.item %364 : !torch.vtensor<[],si8> -> !torch.int
%367 = torch.aten.quantize_per_tensor %5, %365, %366, %int12_30 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%368 = torch.aten.int_repr %367 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%369 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%370 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%371 = torch.aten.item %369 : !torch.vtensor<[],f32> -> !torch.float
%372 = torch.aten.item %370 : !torch.vtensor<[],si8> -> !torch.int
%373 = torch.aten._make_per_tensor_quantized_tensor %368, %371, %372 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%374 = torch.aten.dequantize.self %373 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_31 = torch.constant.int 1
%int1_32 = torch.constant.int 1
%int1_33 = torch.constant.int 1
%int1_34 = torch.constant.int 1
%int1_35 = torch.constant.int 1
%int1_36 = torch.constant.int 1
%int0_37 = torch.constant.int 0
%375 = torch.prim.ListConstruct %int1_31, %int1_32 : (!torch.int, !torch.int) -> !torch.list<int>
%376 = torch.prim.ListConstruct %int1_33, %int1_34 : (!torch.int, !torch.int) -> !torch.list<int>
%377 = torch.prim.ListConstruct %int1_35, %int1_36 : (!torch.int, !torch.int) -> !torch.list<int>
%378 = torch.prim.ListConstruct %int0_37, %int0_37 : (!torch.int, !torch.int) -> !torch.list<int>
%false_38 = torch.constant.bool false
%int1_39 = torch.constant.int 1
%379 = torch.aten.convolution %350, %362, %374, %377, %375, %376, %false_38, %378, %int1_39 : !torch.vtensor<[1,32,160,160],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,160,160],f32>
%380 = torch.aten.relu %379 : !torch.vtensor<[1,32,160,160],f32> -> !torch.vtensor<[1,32,160,160],f32>
%381 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%382 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_40 = torch.constant.int 12
%383 = torch.aten.item %381 : !torch.vtensor<[],f32> -> !torch.float
%384 = torch.aten.item %382 : !torch.vtensor<[],si8> -> !torch.int
%385 = torch.aten.quantize_per_tensor %380, %383, %384, %int12_40 : !torch.vtensor<[1,32,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%386 = torch.aten.int_repr %385 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],si8>
%387 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%388 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%389 = torch.aten.item %387 : !torch.vtensor<[],f32> -> !torch.float
%390 = torch.aten.item %388 : !torch.vtensor<[],si8> -> !torch.int
%391 = torch.aten._make_per_tensor_quantized_tensor %386, %389, %390 : !torch.vtensor<[1,32,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%392 = torch.aten.dequantize.self %391 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],f32>
%int2_41 = torch.constant.int 2
%int2_42 = torch.constant.int 2
%393 = torch.prim.ListConstruct %int2_41, %int2_42 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_43 = torch.constant.int 0
%int0_44 = torch.constant.int 0
%394 = torch.prim.ListConstruct %int0_43, %int0_44 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_45 = torch.constant.int 2
%int2_46 = torch.constant.int 2
%395 = torch.prim.ListConstruct %int2_45, %int2_46 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_47 = torch.constant.int 1
%int1_48 = torch.constant.int 1
%396 = torch.prim.ListConstruct %int1_47, %int1_48 : (!torch.int, !torch.int) -> !torch.list<int>
%true_49 = torch.constant.bool true
%397 = torch.aten.max_pool2d %392, %393, %395, %394, %396, %true_49 : !torch.vtensor<[1,32,160,160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,80,80],f32>
%398 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%399 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_50 = torch.constant.int 12
%400 = torch.aten.item %398 : !torch.vtensor<[],f32> -> !torch.float
%401 = torch.aten.item %399 : !torch.vtensor<[],si8> -> !torch.int
%402 = torch.aten.quantize_per_tensor %397, %400, %401, %int12_50 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%403 = torch.aten.int_repr %402 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%404 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%405 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%406 = torch.aten.item %404 : !torch.vtensor<[],f32> -> !torch.float
%407 = torch.aten.item %405 : !torch.vtensor<[],si8> -> !torch.int
%408 = torch.aten._make_per_tensor_quantized_tensor %403, %406, %407 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%409 = torch.aten.dequantize.self %408 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%410 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%411 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_51 = torch.constant.int 12
%412 = torch.aten.item %410 : !torch.vtensor<[],f32> -> !torch.float
%413 = torch.aten.item %411 : !torch.vtensor<[],si8> -> !torch.int
%414 = torch.aten.quantize_per_tensor %6, %412, %413, %int12_51 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%415 = torch.aten.int_repr %414 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%416 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%418 = torch.aten.item %416 : !torch.vtensor<[],f32> -> !torch.float
%419 = torch.aten.item %417 : !torch.vtensor<[],si8> -> !torch.int
%420 = torch.aten._make_per_tensor_quantized_tensor %415, %418, %419 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%421 = torch.aten.dequantize.self %420 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%422 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_52 = torch.constant.int 12
%424 = torch.aten.item %422 : !torch.vtensor<[],f32> -> !torch.float
%425 = torch.aten.item %423 : !torch.vtensor<[],si8> -> !torch.int
%426 = torch.aten.quantize_per_tensor %7, %424, %425, %int12_52 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%427 = torch.aten.int_repr %426 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%428 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%430 = torch.aten.item %428 : !torch.vtensor<[],f32> -> !torch.float
%431 = torch.aten.item %429 : !torch.vtensor<[],si8> -> !torch.int
%432 = torch.aten._make_per_tensor_quantized_tensor %427, %430, %431 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%433 = torch.aten.dequantize.self %432 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_53 = torch.constant.int 1
%int1_54 = torch.constant.int 1
%int1_55 = torch.constant.int 1
%int1_56 = torch.constant.int 1
%int1_57 = torch.constant.int 1
%int1_58 = torch.constant.int 1
%int0_59 = torch.constant.int 0
%434 = torch.prim.ListConstruct %int1_53, %int1_54 : (!torch.int, !torch.int) -> !torch.list<int>
%435 = torch.prim.ListConstruct %int1_55, %int1_56 : (!torch.int, !torch.int) -> !torch.list<int>
%436 = torch.prim.ListConstruct %int1_57, %int1_58 : (!torch.int, !torch.int) -> !torch.list<int>
%437 = torch.prim.ListConstruct %int0_59, %int0_59 : (!torch.int, !torch.int) -> !torch.list<int>
%false_60 = torch.constant.bool false
%int1_61 = torch.constant.int 1
%438 = torch.aten.convolution %409, %421, %433, %436, %434, %435, %false_60, %437, %int1_61 : !torch.vtensor<[1,32,80,80],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,80,80],f32>
%439 = torch.aten.relu %438 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[1,32,80,80],f32>
%440 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_62 = torch.constant.int 12
%442 = torch.aten.item %440 : !torch.vtensor<[],f32> -> !torch.float
%443 = torch.aten.item %441 : !torch.vtensor<[],si8> -> !torch.int
%444 = torch.aten.quantize_per_tensor %439, %442, %443, %int12_62 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%445 = torch.aten.int_repr %444 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%446 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%448 = torch.aten.item %446 : !torch.vtensor<[],f32> -> !torch.float
%449 = torch.aten.item %447 : !torch.vtensor<[],si8> -> !torch.int
%450 = torch.aten._make_per_tensor_quantized_tensor %445, %448, %449 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%451 = torch.aten.dequantize.self %450 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%int2_63 = torch.constant.int 2
%int2_64 = torch.constant.int 2
%452 = torch.prim.ListConstruct %int2_63, %int2_64 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_65 = torch.constant.int 0
%int0_66 = torch.constant.int 0
%453 = torch.prim.ListConstruct %int0_65, %int0_66 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_67 = torch.constant.int 2
%int2_68 = torch.constant.int 2
%454 = torch.prim.ListConstruct %int2_67, %int2_68 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_69 = torch.constant.int 1
%int1_70 = torch.constant.int 1
%455 = torch.prim.ListConstruct %int1_69, %int1_70 : (!torch.int, !torch.int) -> !torch.list<int>
%true_71 = torch.constant.bool true
%456 = torch.aten.max_pool2d %451, %452, %454, %453, %455, %true_71 : !torch.vtensor<[1,32,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,40,40],f32>
%457 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%458 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_72 = torch.constant.int 12
%459 = torch.aten.item %457 : !torch.vtensor<[],f32> -> !torch.float
%460 = torch.aten.item %458 : !torch.vtensor<[],si8> -> !torch.int
%461 = torch.aten.quantize_per_tensor %456, %459, %460, %int12_72 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%462 = torch.aten.int_repr %461 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%463 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%464 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%465 = torch.aten.item %463 : !torch.vtensor<[],f32> -> !torch.float
%466 = torch.aten.item %464 : !torch.vtensor<[],si8> -> !torch.int
%467 = torch.aten._make_per_tensor_quantized_tensor %462, %465, %466 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%468 = torch.aten.dequantize.self %467 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%469 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%470 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_73 = torch.constant.int 12
%471 = torch.aten.item %469 : !torch.vtensor<[],f32> -> !torch.float
%472 = torch.aten.item %470 : !torch.vtensor<[],si8> -> !torch.int
%473 = torch.aten.quantize_per_tensor %8, %471, %472, %int12_73 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%474 = torch.aten.int_repr %473 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%475 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%476 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%477 = torch.aten.item %475 : !torch.vtensor<[],f32> -> !torch.float
%478 = torch.aten.item %476 : !torch.vtensor<[],si8> -> !torch.int
%479 = torch.aten._make_per_tensor_quantized_tensor %474, %477, %478 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%480 = torch.aten.dequantize.self %479 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%481 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%482 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_74 = torch.constant.int 12
%483 = torch.aten.item %481 : !torch.vtensor<[],f32> -> !torch.float
%484 = torch.aten.item %482 : !torch.vtensor<[],si8> -> !torch.int
%485 = torch.aten.quantize_per_tensor %9, %483, %484, %int12_74 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%486 = torch.aten.int_repr %485 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%487 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%488 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%489 = torch.aten.item %487 : !torch.vtensor<[],f32> -> !torch.float
%490 = torch.aten.item %488 : !torch.vtensor<[],si8> -> !torch.int
%491 = torch.aten._make_per_tensor_quantized_tensor %486, %489, %490 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%492 = torch.aten.dequantize.self %491 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_75 = torch.constant.int 1
%int1_76 = torch.constant.int 1
%int1_77 = torch.constant.int 1
%int1_78 = torch.constant.int 1
%int1_79 = torch.constant.int 1
%int1_80 = torch.constant.int 1
%int0_81 = torch.constant.int 0
%493 = torch.prim.ListConstruct %int1_75, %int1_76 : (!torch.int, !torch.int) -> !torch.list<int>
%494 = torch.prim.ListConstruct %int1_77, %int1_78 : (!torch.int, !torch.int) -> !torch.list<int>
%495 = torch.prim.ListConstruct %int1_79, %int1_80 : (!torch.int, !torch.int) -> !torch.list<int>
%496 = torch.prim.ListConstruct %int0_81, %int0_81 : (!torch.int, !torch.int) -> !torch.list<int>
%false_82 = torch.constant.bool false
%int1_83 = torch.constant.int 1
%497 = torch.aten.convolution %468, %480, %492, %495, %493, %494, %false_82, %496, %int1_83 : !torch.vtensor<[1,32,40,40],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,40,40],f32>
%498 = torch.aten.relu %497 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[1,32,40,40],f32>
%499 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%500 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_84 = torch.constant.int 12
%501 = torch.aten.item %499 : !torch.vtensor<[],f32> -> !torch.float
%502 = torch.aten.item %500 : !torch.vtensor<[],si8> -> !torch.int
%503 = torch.aten.quantize_per_tensor %498, %501, %502, %int12_84 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%504 = torch.aten.int_repr %503 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%505 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%506 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%507 = torch.aten.item %505 : !torch.vtensor<[],f32> -> !torch.float
%508 = torch.aten.item %506 : !torch.vtensor<[],si8> -> !torch.int
%509 = torch.aten._make_per_tensor_quantized_tensor %504, %507, %508 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%510 = torch.aten.dequantize.self %509 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%int2_85 = torch.constant.int 2
%int2_86 = torch.constant.int 2
%511 = torch.prim.ListConstruct %int2_85, %int2_86 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_87 = torch.constant.int 0
%int0_88 = torch.constant.int 0
%512 = torch.prim.ListConstruct %int0_87, %int0_88 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_89 = torch.constant.int 2
%int2_90 = torch.constant.int 2
%513 = torch.prim.ListConstruct %int2_89, %int2_90 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_91 = torch.constant.int 1
%int1_92 = torch.constant.int 1
%514 = torch.prim.ListConstruct %int1_91, %int1_92 : (!torch.int, !torch.int) -> !torch.list<int>
%true_93 = torch.constant.bool true
%515 = torch.aten.max_pool2d %510, %511, %513, %512, %514, %true_93 : !torch.vtensor<[1,32,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,20,20],f32>
%516 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%517 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_94 = torch.constant.int 12
%518 = torch.aten.item %516 : !torch.vtensor<[],f32> -> !torch.float
%519 = torch.aten.item %517 : !torch.vtensor<[],si8> -> !torch.int
%520 = torch.aten.quantize_per_tensor %515, %518, %519, %int12_94 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%521 = torch.aten.int_repr %520 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%522 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%523 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%524 = torch.aten.item %522 : !torch.vtensor<[],f32> -> !torch.float
%525 = torch.aten.item %523 : !torch.vtensor<[],si8> -> !torch.int
%526 = torch.aten._make_per_tensor_quantized_tensor %521, %524, %525 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%527 = torch.aten.dequantize.self %526 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%528 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%529 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_95 = torch.constant.int 12
%530 = torch.aten.item %528 : !torch.vtensor<[],f32> -> !torch.float
%531 = torch.aten.item %529 : !torch.vtensor<[],si8> -> !torch.int
%532 = torch.aten.quantize_per_tensor %10, %530, %531, %int12_95 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%533 = torch.aten.int_repr %532 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%534 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%535 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%536 = torch.aten.item %534 : !torch.vtensor<[],f32> -> !torch.float
%537 = torch.aten.item %535 : !torch.vtensor<[],si8> -> !torch.int
%538 = torch.aten._make_per_tensor_quantized_tensor %533, %536, %537 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%539 = torch.aten.dequantize.self %538 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%540 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%541 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_96 = torch.constant.int 12
%542 = torch.aten.item %540 : !torch.vtensor<[],f32> -> !torch.float
%543 = torch.aten.item %541 : !torch.vtensor<[],si8> -> !torch.int
%544 = torch.aten.quantize_per_tensor %11, %542, %543, %int12_96 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%545 = torch.aten.int_repr %544 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%546 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%547 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%548 = torch.aten.item %546 : !torch.vtensor<[],f32> -> !torch.float
%549 = torch.aten.item %547 : !torch.vtensor<[],si8> -> !torch.int
%550 = torch.aten._make_per_tensor_quantized_tensor %545, %548, %549 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%551 = torch.aten.dequantize.self %550 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_97 = torch.constant.int 1
%int1_98 = torch.constant.int 1
%int1_99 = torch.constant.int 1
%int1_100 = torch.constant.int 1
%int1_101 = torch.constant.int 1
%int1_102 = torch.constant.int 1
%int0_103 = torch.constant.int 0
%552 = torch.prim.ListConstruct %int1_97, %int1_98 : (!torch.int, !torch.int) -> !torch.list<int>
%553 = torch.prim.ListConstruct %int1_99, %int1_100 : (!torch.int, !torch.int) -> !torch.list<int>
%554 = torch.prim.ListConstruct %int1_101, %int1_102 : (!torch.int, !torch.int) -> !torch.list<int>
%555 = torch.prim.ListConstruct %int0_103, %int0_103 : (!torch.int, !torch.int) -> !torch.list<int>
%false_104 = torch.constant.bool false
%int1_105 = torch.constant.int 1
%556 = torch.aten.convolution %527, %539, %551, %554, %552, %553, %false_104, %555, %int1_105 : !torch.vtensor<[1,32,20,20],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,20,20],f32>
%557 = torch.aten.relu %556 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[1,32,20,20],f32>
%558 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%559 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_106 = torch.constant.int 12
%560 = torch.aten.item %558 : !torch.vtensor<[],f32> -> !torch.float
%561 = torch.aten.item %559 : !torch.vtensor<[],si8> -> !torch.int
%562 = torch.aten.quantize_per_tensor %557, %560, %561, %int12_106 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%563 = torch.aten.int_repr %562 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%564 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%565 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%566 = torch.aten.item %564 : !torch.vtensor<[],f32> -> !torch.float
%567 = torch.aten.item %565 : !torch.vtensor<[],si8> -> !torch.int
%568 = torch.aten._make_per_tensor_quantized_tensor %563, %566, %567 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%569 = torch.aten.dequantize.self %568 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%int2_107 = torch.constant.int 2
%int2_108 = torch.constant.int 2
%570 = torch.prim.ListConstruct %int2_107, %int2_108 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_109 = torch.constant.int 0
%int0_110 = torch.constant.int 0
%571 = torch.prim.ListConstruct %int0_109, %int0_110 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_111 = torch.constant.int 2
%int2_112 = torch.constant.int 2
%572 = torch.prim.ListConstruct %int2_111, %int2_112 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_113 = torch.constant.int 1
%int1_114 = torch.constant.int 1
%573 = torch.prim.ListConstruct %int1_113, %int1_114 : (!torch.int, !torch.int) -> !torch.list<int>
%true_115 = torch.constant.bool true
%574 = torch.aten.max_pool2d %569, %570, %572, %571, %573, %true_115 : !torch.vtensor<[1,32,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,10,10],f32>
%575 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%576 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_116 = torch.constant.int 12
%577 = torch.aten.item %575 : !torch.vtensor<[],f32> -> !torch.float
%578 = torch.aten.item %576 : !torch.vtensor<[],si8> -> !torch.int
%579 = torch.aten.quantize_per_tensor %574, %577, %578, %int12_116 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%580 = torch.aten.int_repr %579 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%581 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%582 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%583 = torch.aten.item %581 : !torch.vtensor<[],f32> -> !torch.float
%584 = torch.aten.item %582 : !torch.vtensor<[],si8> -> !torch.int
%585 = torch.aten._make_per_tensor_quantized_tensor %580, %583, %584 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%586 = torch.aten.dequantize.self %585 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%587 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%588 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_117 = torch.constant.int 12
%589 = torch.aten.item %587 : !torch.vtensor<[],f32> -> !torch.float
%590 = torch.aten.item %588 : !torch.vtensor<[],si8> -> !torch.int
%591 = torch.aten.quantize_per_tensor %12, %589, %590, %int12_117 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%592 = torch.aten.int_repr %591 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%593 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%594 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%595 = torch.aten.item %593 : !torch.vtensor<[],f32> -> !torch.float
%596 = torch.aten.item %594 : !torch.vtensor<[],si8> -> !torch.int
%597 = torch.aten._make_per_tensor_quantized_tensor %592, %595, %596 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%598 = torch.aten.dequantize.self %597 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%599 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%600 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_118 = torch.constant.int 12
%601 = torch.aten.item %599 : !torch.vtensor<[],f32> -> !torch.float
%602 = torch.aten.item %600 : !torch.vtensor<[],si8> -> !torch.int
%603 = torch.aten.quantize_per_tensor %13, %601, %602, %int12_118 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%604 = torch.aten.int_repr %603 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%605 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%606 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%607 = torch.aten.item %605 : !torch.vtensor<[],f32> -> !torch.float
%608 = torch.aten.item %606 : !torch.vtensor<[],si8> -> !torch.int
%609 = torch.aten._make_per_tensor_quantized_tensor %604, %607, %608 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%610 = torch.aten.dequantize.self %609 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_119 = torch.constant.int 1
%int1_120 = torch.constant.int 1
%int1_121 = torch.constant.int 1
%int1_122 = torch.constant.int 1
%int1_123 = torch.constant.int 1
%int1_124 = torch.constant.int 1
%int0_125 = torch.constant.int 0
%611 = torch.prim.ListConstruct %int1_119, %int1_120 : (!torch.int, !torch.int) -> !torch.list<int>
%612 = torch.prim.ListConstruct %int1_121, %int1_122 : (!torch.int, !torch.int) -> !torch.list<int>
%613 = torch.prim.ListConstruct %int1_123, %int1_124 : (!torch.int, !torch.int) -> !torch.list<int>
%614 = torch.prim.ListConstruct %int0_125, %int0_125 : (!torch.int, !torch.int) -> !torch.list<int>
%false_126 = torch.constant.bool false
%int1_127 = torch.constant.int 1
%615 = torch.aten.convolution %586, %598, %610, %613, %611, %612, %false_126, %614, %int1_127 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%616 = torch.aten.relu %615 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%617 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%618 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_128 = torch.constant.int 12
%619 = torch.aten.item %617 : !torch.vtensor<[],f32> -> !torch.float
%620 = torch.aten.item %618 : !torch.vtensor<[],si8> -> !torch.int
%621 = torch.aten.quantize_per_tensor %616, %619, %620, %int12_128 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%622 = torch.aten.int_repr %621 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%623 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%624 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%625 = torch.aten.item %623 : !torch.vtensor<[],f32> -> !torch.float
%626 = torch.aten.item %624 : !torch.vtensor<[],si8> -> !torch.int
%627 = torch.aten._make_per_tensor_quantized_tensor %622, %625, %626 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%628 = torch.aten.dequantize.self %627 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%629 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%630 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_129 = torch.constant.int 12
%631 = torch.aten.item %629 : !torch.vtensor<[],f32> -> !torch.float
%632 = torch.aten.item %630 : !torch.vtensor<[],si8> -> !torch.int
%633 = torch.aten.quantize_per_tensor %14, %631, %632, %int12_129 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%634 = torch.aten.int_repr %633 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%635 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%636 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%637 = torch.aten.item %635 : !torch.vtensor<[],f32> -> !torch.float
%638 = torch.aten.item %636 : !torch.vtensor<[],si8> -> !torch.int
%639 = torch.aten._make_per_tensor_quantized_tensor %634, %637, %638 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%640 = torch.aten.dequantize.self %639 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%641 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%642 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_130 = torch.constant.int 12
%643 = torch.aten.item %641 : !torch.vtensor<[],f32> -> !torch.float
%644 = torch.aten.item %642 : !torch.vtensor<[],si8> -> !torch.int
%645 = torch.aten.quantize_per_tensor %15, %643, %644, %int12_130 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%646 = torch.aten.int_repr %645 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%647 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%648 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%649 = torch.aten.item %647 : !torch.vtensor<[],f32> -> !torch.float
%650 = torch.aten.item %648 : !torch.vtensor<[],si8> -> !torch.int
%651 = torch.aten._make_per_tensor_quantized_tensor %646, %649, %650 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%652 = torch.aten.dequantize.self %651 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int2_131 = torch.constant.int 2
%int2_132 = torch.constant.int 2
%int2_133 = torch.constant.int 2
%int2_134 = torch.constant.int 2
%int1_135 = torch.constant.int 1
%int1_136 = torch.constant.int 1
%int0_137 = torch.constant.int 0
%653 = torch.prim.ListConstruct %int2_131, %int2_132 : (!torch.int, !torch.int) -> !torch.list<int>
%654 = torch.prim.ListConstruct %int2_133, %int2_134 : (!torch.int, !torch.int) -> !torch.list<int>
%655 = torch.prim.ListConstruct %int1_135, %int1_136 : (!torch.int, !torch.int) -> !torch.list<int>
%656 = torch.prim.ListConstruct %int0_137, %int0_137 : (!torch.int, !torch.int) -> !torch.list<int>
%false_138 = torch.constant.bool false
%int1_139 = torch.constant.int 1
%657 = torch.aten.convolution %628, %640, %652, %655, %653, %654, %false_138, %656, %int1_139 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%658 = torch.aten.relu %657 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%659 = torch.prim.ListConstruct %658, %628 : (!torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[1,32,10,10],f32>) -> !torch.list<vtensor>
%int1_140 = torch.constant.int 1
%660 = torch.aten.cat %659, %int1_140 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%661 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%662 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_141 = torch.constant.int 12
%663 = torch.aten.item %661 : !torch.vtensor<[],f32> -> !torch.float
%664 = torch.aten.item %662 : !torch.vtensor<[],si8> -> !torch.int
%665 = torch.aten.quantize_per_tensor %660, %663, %664, %int12_141 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%666 = torch.aten.int_repr %665 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%667 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%668 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%669 = torch.aten.item %667 : !torch.vtensor<[],f32> -> !torch.float
%670 = torch.aten.item %668 : !torch.vtensor<[],si8> -> !torch.int
%671 = torch.aten._make_per_tensor_quantized_tensor %666, %669, %670 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%672 = torch.aten.dequantize.self %671 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%673 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%674 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_142 = torch.constant.int 12
%675 = torch.aten.item %673 : !torch.vtensor<[],f32> -> !torch.float
%676 = torch.aten.item %674 : !torch.vtensor<[],si8> -> !torch.int
%677 = torch.aten.quantize_per_tensor %16, %675, %676, %int12_142 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%678 = torch.aten.int_repr %677 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%679 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%680 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%681 = torch.aten.item %679 : !torch.vtensor<[],f32> -> !torch.float
%682 = torch.aten.item %680 : !torch.vtensor<[],si8> -> !torch.int
%683 = torch.aten._make_per_tensor_quantized_tensor %678, %681, %682 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%684 = torch.aten.dequantize.self %683 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%685 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%686 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_143 = torch.constant.int 12
%687 = torch.aten.item %685 : !torch.vtensor<[],f32> -> !torch.float
%688 = torch.aten.item %686 : !torch.vtensor<[],si8> -> !torch.int
%689 = torch.aten.quantize_per_tensor %17, %687, %688, %int12_143 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%690 = torch.aten.int_repr %689 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%691 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%692 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%693 = torch.aten.item %691 : !torch.vtensor<[],f32> -> !torch.float
%694 = torch.aten.item %692 : !torch.vtensor<[],si8> -> !torch.int
%695 = torch.aten._make_per_tensor_quantized_tensor %690, %693, %694 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%696 = torch.aten.dequantize.self %695 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_144 = torch.constant.int 1
%int1_145 = torch.constant.int 1
%int1_146 = torch.constant.int 1
%int1_147 = torch.constant.int 1
%int1_148 = torch.constant.int 1
%int1_149 = torch.constant.int 1
%int0_150 = torch.constant.int 0
%697 = torch.prim.ListConstruct %int1_144, %int1_145 : (!torch.int, !torch.int) -> !torch.list<int>
%698 = torch.prim.ListConstruct %int1_146, %int1_147 : (!torch.int, !torch.int) -> !torch.list<int>
%699 = torch.prim.ListConstruct %int1_148, %int1_149 : (!torch.int, !torch.int) -> !torch.list<int>
%700 = torch.prim.ListConstruct %int0_150, %int0_150 : (!torch.int, !torch.int) -> !torch.list<int>
%false_151 = torch.constant.bool false
%int1_152 = torch.constant.int 1
%701 = torch.aten.convolution %672, %684, %696, %699, %697, %698, %false_151, %700, %int1_152 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%702 = torch.aten.relu %701 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%703 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%704 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_153 = torch.constant.int 12
%705 = torch.aten.item %703 : !torch.vtensor<[],f32> -> !torch.float
%706 = torch.aten.item %704 : !torch.vtensor<[],si8> -> !torch.int
%707 = torch.aten.quantize_per_tensor %702, %705, %706, %int12_153 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%708 = torch.aten.int_repr %707 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%709 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%710 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%711 = torch.aten.item %709 : !torch.vtensor<[],f32> -> !torch.float
%712 = torch.aten.item %710 : !torch.vtensor<[],si8> -> !torch.int
%713 = torch.aten._make_per_tensor_quantized_tensor %708, %711, %712 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%714 = torch.aten.dequantize.self %713 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%715 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%716 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%717 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_154 = torch.constant.int 0
%int0_155 = torch.constant.int 0
%int0_156 = torch.constant.int 0
%718 = torch.aten.select.int %717, %int0_154, %int0_156 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%719 = torch.aten.item %718 : !torch.vtensor<[1],si64> -> !torch.int
%720 = torch.aten.lt.int %719, %int0_154 : !torch.int, !torch.int -> !torch.bool
%721 = torch.aten.Int.bool %720 : !torch.bool -> !torch.int
%722 = torch.aten.mul.int %721, %int0_155 : !torch.int, !torch.int -> !torch.int
%723 = torch.aten.add.int %719, %722 : !torch.int, !torch.int -> !torch.int
%724 = torch.prim.ListConstruct %723 : (!torch.int) -> !torch.list<int>
%false_157 = torch.constant.bool false
%none_158 = torch.constant.none
%725 = torch.aten.tensor %724, %none_158, %none_158, %false_157 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values, %indices = torch.aten.sort %725, %int0_154, %false_157 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_159 = torch.constant.int 0
%726 = torch.aten.select.int %values, %int0_154, %int0_159 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%727 = torch.aten.item %726 : !torch.vtensor<[1],si64> -> !torch.int
%728 = torch.aten.unsqueeze %715, %727 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%729 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_160 = torch.constant.int 0
%int0_161 = torch.constant.int 0
%int0_162 = torch.constant.int 0
%730 = torch.aten.select.int %729, %int0_160, %int0_162 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%731 = torch.aten.item %730 : !torch.vtensor<[1],si64> -> !torch.int
%732 = torch.aten.lt.int %731, %int0_160 : !torch.int, !torch.int -> !torch.bool
%733 = torch.aten.Int.bool %732 : !torch.bool -> !torch.int
%734 = torch.aten.mul.int %733, %int0_161 : !torch.int, !torch.int -> !torch.int
%735 = torch.aten.add.int %731, %734 : !torch.int, !torch.int -> !torch.int
%736 = torch.prim.ListConstruct %735 : (!torch.int) -> !torch.list<int>
%false_163 = torch.constant.bool false
%none_164 = torch.constant.none
%737 = torch.aten.tensor %736, %none_164, %none_164, %false_163 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_165, %indices_166 = torch.aten.sort %737, %int0_160, %false_163 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_167 = torch.constant.int 0
%738 = torch.aten.select.int %values_165, %int0_160, %int0_167 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%739 = torch.aten.item %738 : !torch.vtensor<[1],si64> -> !torch.int
%740 = torch.aten.unsqueeze %716, %739 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%741 = torch.prim.ListConstruct %728, %740 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_168 = torch.constant.int 0
%742 = torch.aten.cat %741, %int0_168 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%743 = torch.aten._shape_as_tensor %714 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[4],si64>
%744 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%745 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%746 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_169 = torch.constant.none
%int1_170 = torch.constant.int 1
%747 = torch.prim.ListConstruct %int1_170 : (!torch.int) -> !torch.list<int>
%748 = torch.aten.ones %747, %none_169, %none_169, %none_169, %none_169 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_171 = torch.constant.int 0
%int0_172 = torch.constant.int 0
%749 = torch.prim.NumToTensor.Scalar %int0_172 : !torch.int -> !torch.vtensor<[1],si64>
%750 = torch.aten.index_select %745, %int0_171, %749 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%751 = torch.aten.item %750 : !torch.vtensor<[1],si64> -> !torch.int
%752 = torch.aten.index_select %746, %int0_171, %749 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%753 = torch.aten.item %752 : !torch.vtensor<[1],si64> -> !torch.int
%754 = torch.aten.index_select %744, %int0_171, %749 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%755 = torch.aten.item %754 : !torch.vtensor<[1],si64> -> !torch.int
%756 = torch.aten.index_select %748, %int0_171, %749 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%757 = torch.aten.item %756 : !torch.vtensor<[1],si64> -> !torch.int
%758 = torch.aten.slice.Tensor %743, %755, %751, %753, %757 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4 = torch.constant.int 4
%none_173 = torch.constant.none
%false_174 = torch.constant.bool false
%759 = torch.aten.to.dtype %742, %int4, %false_174, %false_174, %none_173 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%760 = torch.prim.ListConstruct %758, %759 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_175 = torch.constant.int 0
%761 = torch.aten.cat %760, %int0_175 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%762 = torch.operator "onnx.Resize"(%714, %none, %none, %761) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%763 = torch.prim.ListConstruct %762, %569 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,20,20],f32>) -> !torch.list<vtensor>
%int1_176 = torch.constant.int 1
%764 = torch.aten.cat %763, %int1_176 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%765 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%766 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_177 = torch.constant.int 12
%767 = torch.aten.item %765 : !torch.vtensor<[],f32> -> !torch.float
%768 = torch.aten.item %766 : !torch.vtensor<[],si8> -> !torch.int
%769 = torch.aten.quantize_per_tensor %764, %767, %768, %int12_177 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%770 = torch.aten.int_repr %769 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%771 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%772 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%773 = torch.aten.item %771 : !torch.vtensor<[],f32> -> !torch.float
%774 = torch.aten.item %772 : !torch.vtensor<[],si8> -> !torch.int
%775 = torch.aten._make_per_tensor_quantized_tensor %770, %773, %774 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%776 = torch.aten.dequantize.self %775 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%777 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%778 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_178 = torch.constant.int 12
%779 = torch.aten.item %777 : !torch.vtensor<[],f32> -> !torch.float
%780 = torch.aten.item %778 : !torch.vtensor<[],si8> -> !torch.int
%781 = torch.aten.quantize_per_tensor %18, %779, %780, %int12_178 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%782 = torch.aten.int_repr %781 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%783 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%784 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%785 = torch.aten.item %783 : !torch.vtensor<[],f32> -> !torch.float
%786 = torch.aten.item %784 : !torch.vtensor<[],si8> -> !torch.int
%787 = torch.aten._make_per_tensor_quantized_tensor %782, %785, %786 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%788 = torch.aten.dequantize.self %787 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%789 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%790 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_179 = torch.constant.int 12
%791 = torch.aten.item %789 : !torch.vtensor<[],f32> -> !torch.float
%792 = torch.aten.item %790 : !torch.vtensor<[],si8> -> !torch.int
%793 = torch.aten.quantize_per_tensor %19, %791, %792, %int12_179 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%794 = torch.aten.int_repr %793 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%795 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%796 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%797 = torch.aten.item %795 : !torch.vtensor<[],f32> -> !torch.float
%798 = torch.aten.item %796 : !torch.vtensor<[],si8> -> !torch.int
%799 = torch.aten._make_per_tensor_quantized_tensor %794, %797, %798 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%800 = torch.aten.dequantize.self %799 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_180 = torch.constant.int 1
%int1_181 = torch.constant.int 1
%int1_182 = torch.constant.int 1
%int1_183 = torch.constant.int 1
%int1_184 = torch.constant.int 1
%int1_185 = torch.constant.int 1
%int0_186 = torch.constant.int 0
%801 = torch.prim.ListConstruct %int1_180, %int1_181 : (!torch.int, !torch.int) -> !torch.list<int>
%802 = torch.prim.ListConstruct %int1_182, %int1_183 : (!torch.int, !torch.int) -> !torch.list<int>
%803 = torch.prim.ListConstruct %int1_184, %int1_185 : (!torch.int, !torch.int) -> !torch.list<int>
%804 = torch.prim.ListConstruct %int0_186, %int0_186 : (!torch.int, !torch.int) -> !torch.list<int>
%false_187 = torch.constant.bool false
%int1_188 = torch.constant.int 1
%805 = torch.aten.convolution %776, %788, %800, %803, %801, %802, %false_187, %804, %int1_188 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,20,20],f32>
%806 = torch.aten.relu %805 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[1,32,20,20],f32>
%807 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%808 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_189 = torch.constant.int 12
%809 = torch.aten.item %807 : !torch.vtensor<[],f32> -> !torch.float
%810 = torch.aten.item %808 : !torch.vtensor<[],si8> -> !torch.int
%811 = torch.aten.quantize_per_tensor %806, %809, %810, %int12_189 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%812 = torch.aten.int_repr %811 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%813 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%814 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%815 = torch.aten.item %813 : !torch.vtensor<[],f32> -> !torch.float
%816 = torch.aten.item %814 : !torch.vtensor<[],si8> -> !torch.int
%817 = torch.aten._make_per_tensor_quantized_tensor %812, %815, %816 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%818 = torch.aten.dequantize.self %817 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%819 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%820 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%821 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_190 = torch.constant.int 0
%int0_191 = torch.constant.int 0
%int0_192 = torch.constant.int 0
%822 = torch.aten.select.int %821, %int0_190, %int0_192 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%823 = torch.aten.item %822 : !torch.vtensor<[1],si64> -> !torch.int
%824 = torch.aten.lt.int %823, %int0_190 : !torch.int, !torch.int -> !torch.bool
%825 = torch.aten.Int.bool %824 : !torch.bool -> !torch.int
%826 = torch.aten.mul.int %825, %int0_191 : !torch.int, !torch.int -> !torch.int
%827 = torch.aten.add.int %823, %826 : !torch.int, !torch.int -> !torch.int
%828 = torch.prim.ListConstruct %827 : (!torch.int) -> !torch.list<int>
%false_193 = torch.constant.bool false
%none_194 = torch.constant.none
%829 = torch.aten.tensor %828, %none_194, %none_194, %false_193 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_195, %indices_196 = torch.aten.sort %829, %int0_190, %false_193 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_197 = torch.constant.int 0
%830 = torch.aten.select.int %values_195, %int0_190, %int0_197 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%831 = torch.aten.item %830 : !torch.vtensor<[1],si64> -> !torch.int
%832 = torch.aten.unsqueeze %819, %831 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%833 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_198 = torch.constant.int 0
%int0_199 = torch.constant.int 0
%int0_200 = torch.constant.int 0
%834 = torch.aten.select.int %833, %int0_198, %int0_200 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%835 = torch.aten.item %834 : !torch.vtensor<[1],si64> -> !torch.int
%836 = torch.aten.lt.int %835, %int0_198 : !torch.int, !torch.int -> !torch.bool
%837 = torch.aten.Int.bool %836 : !torch.bool -> !torch.int
%838 = torch.aten.mul.int %837, %int0_199 : !torch.int, !torch.int -> !torch.int
%839 = torch.aten.add.int %835, %838 : !torch.int, !torch.int -> !torch.int
%840 = torch.prim.ListConstruct %839 : (!torch.int) -> !torch.list<int>
%false_201 = torch.constant.bool false
%none_202 = torch.constant.none
%841 = torch.aten.tensor %840, %none_202, %none_202, %false_201 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_203, %indices_204 = torch.aten.sort %841, %int0_198, %false_201 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_205 = torch.constant.int 0
%842 = torch.aten.select.int %values_203, %int0_198, %int0_205 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%843 = torch.aten.item %842 : !torch.vtensor<[1],si64> -> !torch.int
%844 = torch.aten.unsqueeze %820, %843 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%845 = torch.prim.ListConstruct %832, %844 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_206 = torch.constant.int 0
%846 = torch.aten.cat %845, %int0_206 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%847 = torch.aten._shape_as_tensor %818 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[4],si64>
%848 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%849 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%850 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_207 = torch.constant.none
%int1_208 = torch.constant.int 1
%851 = torch.prim.ListConstruct %int1_208 : (!torch.int) -> !torch.list<int>
%852 = torch.aten.ones %851, %none_207, %none_207, %none_207, %none_207 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_209 = torch.constant.int 0
%int0_210 = torch.constant.int 0
%853 = torch.prim.NumToTensor.Scalar %int0_210 : !torch.int -> !torch.vtensor<[1],si64>
%854 = torch.aten.index_select %849, %int0_209, %853 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%855 = torch.aten.item %854 : !torch.vtensor<[1],si64> -> !torch.int
%856 = torch.aten.index_select %850, %int0_209, %853 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%857 = torch.aten.item %856 : !torch.vtensor<[1],si64> -> !torch.int
%858 = torch.aten.index_select %848, %int0_209, %853 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%859 = torch.aten.item %858 : !torch.vtensor<[1],si64> -> !torch.int
%860 = torch.aten.index_select %852, %int0_209, %853 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%861 = torch.aten.item %860 : !torch.vtensor<[1],si64> -> !torch.int
%862 = torch.aten.slice.Tensor %847, %859, %855, %857, %861 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_211 = torch.constant.int 4
%none_212 = torch.constant.none
%false_213 = torch.constant.bool false
%863 = torch.aten.to.dtype %846, %int4_211, %false_213, %false_213, %none_212 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%864 = torch.prim.ListConstruct %862, %863 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_214 = torch.constant.int 0
%865 = torch.aten.cat %864, %int0_214 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%866 = torch.operator "onnx.Resize"(%818, %none, %none, %865) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%867 = torch.prim.ListConstruct %866, %510 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,40,40],f32>) -> !torch.list<vtensor>
%int1_215 = torch.constant.int 1
%868 = torch.aten.cat %867, %int1_215 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%869 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%870 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_216 = torch.constant.int 12
%871 = torch.aten.item %869 : !torch.vtensor<[],f32> -> !torch.float
%872 = torch.aten.item %870 : !torch.vtensor<[],si8> -> !torch.int
%873 = torch.aten.quantize_per_tensor %868, %871, %872, %int12_216 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%874 = torch.aten.int_repr %873 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%875 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%876 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%877 = torch.aten.item %875 : !torch.vtensor<[],f32> -> !torch.float
%878 = torch.aten.item %876 : !torch.vtensor<[],si8> -> !torch.int
%879 = torch.aten._make_per_tensor_quantized_tensor %874, %877, %878 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%880 = torch.aten.dequantize.self %879 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%881 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%882 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_217 = torch.constant.int 12
%883 = torch.aten.item %881 : !torch.vtensor<[],f32> -> !torch.float
%884 = torch.aten.item %882 : !torch.vtensor<[],si8> -> !torch.int
%885 = torch.aten.quantize_per_tensor %20, %883, %884, %int12_217 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%886 = torch.aten.int_repr %885 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%887 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%888 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%889 = torch.aten.item %887 : !torch.vtensor<[],f32> -> !torch.float
%890 = torch.aten.item %888 : !torch.vtensor<[],si8> -> !torch.int
%891 = torch.aten._make_per_tensor_quantized_tensor %886, %889, %890 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%892 = torch.aten.dequantize.self %891 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%893 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%894 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_218 = torch.constant.int 12
%895 = torch.aten.item %893 : !torch.vtensor<[],f32> -> !torch.float
%896 = torch.aten.item %894 : !torch.vtensor<[],si8> -> !torch.int
%897 = torch.aten.quantize_per_tensor %21, %895, %896, %int12_218 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%898 = torch.aten.int_repr %897 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%899 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%900 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%901 = torch.aten.item %899 : !torch.vtensor<[],f32> -> !torch.float
%902 = torch.aten.item %900 : !torch.vtensor<[],si8> -> !torch.int
%903 = torch.aten._make_per_tensor_quantized_tensor %898, %901, %902 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%904 = torch.aten.dequantize.self %903 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_219 = torch.constant.int 1
%int1_220 = torch.constant.int 1
%int1_221 = torch.constant.int 1
%int1_222 = torch.constant.int 1
%int1_223 = torch.constant.int 1
%int1_224 = torch.constant.int 1
%int0_225 = torch.constant.int 0
%905 = torch.prim.ListConstruct %int1_219, %int1_220 : (!torch.int, !torch.int) -> !torch.list<int>
%906 = torch.prim.ListConstruct %int1_221, %int1_222 : (!torch.int, !torch.int) -> !torch.list<int>
%907 = torch.prim.ListConstruct %int1_223, %int1_224 : (!torch.int, !torch.int) -> !torch.list<int>
%908 = torch.prim.ListConstruct %int0_225, %int0_225 : (!torch.int, !torch.int) -> !torch.list<int>
%false_226 = torch.constant.bool false
%int1_227 = torch.constant.int 1
%909 = torch.aten.convolution %880, %892, %904, %907, %905, %906, %false_226, %908, %int1_227 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,40,40],f32>
%910 = torch.aten.relu %909 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[1,32,40,40],f32>
%911 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%912 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_228 = torch.constant.int 12
%913 = torch.aten.item %911 : !torch.vtensor<[],f32> -> !torch.float
%914 = torch.aten.item %912 : !torch.vtensor<[],si8> -> !torch.int
%915 = torch.aten.quantize_per_tensor %910, %913, %914, %int12_228 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%916 = torch.aten.int_repr %915 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%917 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%918 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%919 = torch.aten.item %917 : !torch.vtensor<[],f32> -> !torch.float
%920 = torch.aten.item %918 : !torch.vtensor<[],si8> -> !torch.int
%921 = torch.aten._make_per_tensor_quantized_tensor %916, %919, %920 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%922 = torch.aten.dequantize.self %921 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%923 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%924 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%925 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_229 = torch.constant.int 0
%int0_230 = torch.constant.int 0
%int0_231 = torch.constant.int 0
%926 = torch.aten.select.int %925, %int0_229, %int0_231 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%927 = torch.aten.item %926 : !torch.vtensor<[1],si64> -> !torch.int
%928 = torch.aten.lt.int %927, %int0_229 : !torch.int, !torch.int -> !torch.bool
%929 = torch.aten.Int.bool %928 : !torch.bool -> !torch.int
%930 = torch.aten.mul.int %929, %int0_230 : !torch.int, !torch.int -> !torch.int
%931 = torch.aten.add.int %927, %930 : !torch.int, !torch.int -> !torch.int
%932 = torch.prim.ListConstruct %931 : (!torch.int) -> !torch.list<int>
%false_232 = torch.constant.bool false
%none_233 = torch.constant.none
%933 = torch.aten.tensor %932, %none_233, %none_233, %false_232 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_234, %indices_235 = torch.aten.sort %933, %int0_229, %false_232 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_236 = torch.constant.int 0
%934 = torch.aten.select.int %values_234, %int0_229, %int0_236 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%935 = torch.aten.item %934 : !torch.vtensor<[1],si64> -> !torch.int
%936 = torch.aten.unsqueeze %923, %935 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%937 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_237 = torch.constant.int 0
%int0_238 = torch.constant.int 0
%int0_239 = torch.constant.int 0
%938 = torch.aten.select.int %937, %int0_237, %int0_239 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%939 = torch.aten.item %938 : !torch.vtensor<[1],si64> -> !torch.int
%940 = torch.aten.lt.int %939, %int0_237 : !torch.int, !torch.int -> !torch.bool
%941 = torch.aten.Int.bool %940 : !torch.bool -> !torch.int
%942 = torch.aten.mul.int %941, %int0_238 : !torch.int, !torch.int -> !torch.int
%943 = torch.aten.add.int %939, %942 : !torch.int, !torch.int -> !torch.int
%944 = torch.prim.ListConstruct %943 : (!torch.int) -> !torch.list<int>
%false_240 = torch.constant.bool false
%none_241 = torch.constant.none
%945 = torch.aten.tensor %944, %none_241, %none_241, %false_240 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_242, %indices_243 = torch.aten.sort %945, %int0_237, %false_240 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_244 = torch.constant.int 0
%946 = torch.aten.select.int %values_242, %int0_237, %int0_244 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%947 = torch.aten.item %946 : !torch.vtensor<[1],si64> -> !torch.int
%948 = torch.aten.unsqueeze %924, %947 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%949 = torch.prim.ListConstruct %936, %948 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_245 = torch.constant.int 0
%950 = torch.aten.cat %949, %int0_245 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%951 = torch.aten._shape_as_tensor %922 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[4],si64>
%952 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%953 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%954 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_246 = torch.constant.none
%int1_247 = torch.constant.int 1
%955 = torch.prim.ListConstruct %int1_247 : (!torch.int) -> !torch.list<int>
%956 = torch.aten.ones %955, %none_246, %none_246, %none_246, %none_246 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_248 = torch.constant.int 0
%int0_249 = torch.constant.int 0
%957 = torch.prim.NumToTensor.Scalar %int0_249 : !torch.int -> !torch.vtensor<[1],si64>
%958 = torch.aten.index_select %953, %int0_248, %957 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%959 = torch.aten.item %958 : !torch.vtensor<[1],si64> -> !torch.int
%960 = torch.aten.index_select %954, %int0_248, %957 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%961 = torch.aten.item %960 : !torch.vtensor<[1],si64> -> !torch.int
%962 = torch.aten.index_select %952, %int0_248, %957 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%963 = torch.aten.item %962 : !torch.vtensor<[1],si64> -> !torch.int
%964 = torch.aten.index_select %956, %int0_248, %957 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%965 = torch.aten.item %964 : !torch.vtensor<[1],si64> -> !torch.int
%966 = torch.aten.slice.Tensor %951, %963, %959, %961, %965 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_250 = torch.constant.int 4
%none_251 = torch.constant.none
%false_252 = torch.constant.bool false
%967 = torch.aten.to.dtype %950, %int4_250, %false_252, %false_252, %none_251 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%968 = torch.prim.ListConstruct %966, %967 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_253 = torch.constant.int 0
%969 = torch.aten.cat %968, %int0_253 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%970 = torch.operator "onnx.Resize"(%922, %none, %none, %969) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,40,40],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%971 = torch.prim.ListConstruct %970, %451 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,80,80],f32>) -> !torch.list<vtensor>
%int1_254 = torch.constant.int 1
%972 = torch.aten.cat %971, %int1_254 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,80,80],f32>
%973 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%974 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_255 = torch.constant.int 12
%975 = torch.aten.item %973 : !torch.vtensor<[],f32> -> !torch.float
%976 = torch.aten.item %974 : !torch.vtensor<[],si8> -> !torch.int
%977 = torch.aten.quantize_per_tensor %972, %975, %976, %int12_255 : !torch.vtensor<[1,?,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%978 = torch.aten.int_repr %977 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],si8>
%979 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%980 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%981 = torch.aten.item %979 : !torch.vtensor<[],f32> -> !torch.float
%982 = torch.aten.item %980 : !torch.vtensor<[],si8> -> !torch.int
%983 = torch.aten._make_per_tensor_quantized_tensor %978, %981, %982 : !torch.vtensor<[1,?,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%984 = torch.aten.dequantize.self %983 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],f32>
%985 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%986 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_256 = torch.constant.int 12
%987 = torch.aten.item %985 : !torch.vtensor<[],f32> -> !torch.float
%988 = torch.aten.item %986 : !torch.vtensor<[],si8> -> !torch.int
%989 = torch.aten.quantize_per_tensor %22, %987, %988, %int12_256 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%990 = torch.aten.int_repr %989 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%991 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%992 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%993 = torch.aten.item %991 : !torch.vtensor<[],f32> -> !torch.float
%994 = torch.aten.item %992 : !torch.vtensor<[],si8> -> !torch.int
%995 = torch.aten._make_per_tensor_quantized_tensor %990, %993, %994 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%996 = torch.aten.dequantize.self %995 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%997 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%998 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_257 = torch.constant.int 12
%999 = torch.aten.item %997 : !torch.vtensor<[],f32> -> !torch.float
%1000 = torch.aten.item %998 : !torch.vtensor<[],si8> -> !torch.int
%1001 = torch.aten.quantize_per_tensor %23, %999, %1000, %int12_257 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1002 = torch.aten.int_repr %1001 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1003 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1004 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1005 = torch.aten.item %1003 : !torch.vtensor<[],f32> -> !torch.float
%1006 = torch.aten.item %1004 : !torch.vtensor<[],si8> -> !torch.int
%1007 = torch.aten._make_per_tensor_quantized_tensor %1002, %1005, %1006 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1008 = torch.aten.dequantize.self %1007 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_258 = torch.constant.int 1
%int1_259 = torch.constant.int 1
%int1_260 = torch.constant.int 1
%int1_261 = torch.constant.int 1
%int1_262 = torch.constant.int 1
%int1_263 = torch.constant.int 1
%int0_264 = torch.constant.int 0
%1009 = torch.prim.ListConstruct %int1_258, %int1_259 : (!torch.int, !torch.int) -> !torch.list<int>
%1010 = torch.prim.ListConstruct %int1_260, %int1_261 : (!torch.int, !torch.int) -> !torch.list<int>
%1011 = torch.prim.ListConstruct %int1_262, %int1_263 : (!torch.int, !torch.int) -> !torch.list<int>
%1012 = torch.prim.ListConstruct %int0_264, %int0_264 : (!torch.int, !torch.int) -> !torch.list<int>
%false_265 = torch.constant.bool false
%int1_266 = torch.constant.int 1
%1013 = torch.aten.convolution %984, %996, %1008, %1011, %1009, %1010, %false_265, %1012, %int1_266 : !torch.vtensor<[1,?,80,80],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,80,80],f32>
%1014 = torch.aten.relu %1013 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[1,32,80,80],f32>
%1015 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1016 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_267 = torch.constant.int 12
%1017 = torch.aten.item %1015 : !torch.vtensor<[],f32> -> !torch.float
%1018 = torch.aten.item %1016 : !torch.vtensor<[],si8> -> !torch.int
%1019 = torch.aten.quantize_per_tensor %1014, %1017, %1018, %int12_267 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1020 = torch.aten.int_repr %1019 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%1021 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1022 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1023 = torch.aten.item %1021 : !torch.vtensor<[],f32> -> !torch.float
%1024 = torch.aten.item %1022 : !torch.vtensor<[],si8> -> !torch.int
%1025 = torch.aten._make_per_tensor_quantized_tensor %1020, %1023, %1024 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1026 = torch.aten.dequantize.self %1025 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%1027 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%1028 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%1029 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_268 = torch.constant.int 0
%int0_269 = torch.constant.int 0
%int0_270 = torch.constant.int 0
%1030 = torch.aten.select.int %1029, %int0_268, %int0_270 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1031 = torch.aten.item %1030 : !torch.vtensor<[1],si64> -> !torch.int
%1032 = torch.aten.lt.int %1031, %int0_268 : !torch.int, !torch.int -> !torch.bool
%1033 = torch.aten.Int.bool %1032 : !torch.bool -> !torch.int
%1034 = torch.aten.mul.int %1033, %int0_269 : !torch.int, !torch.int -> !torch.int
%1035 = torch.aten.add.int %1031, %1034 : !torch.int, !torch.int -> !torch.int
%1036 = torch.prim.ListConstruct %1035 : (!torch.int) -> !torch.list<int>
%false_271 = torch.constant.bool false
%none_272 = torch.constant.none
%1037 = torch.aten.tensor %1036, %none_272, %none_272, %false_271 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_273, %indices_274 = torch.aten.sort %1037, %int0_268, %false_271 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_275 = torch.constant.int 0
%1038 = torch.aten.select.int %values_273, %int0_268, %int0_275 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1039 = torch.aten.item %1038 : !torch.vtensor<[1],si64> -> !torch.int
%1040 = torch.aten.unsqueeze %1027, %1039 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1041 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_276 = torch.constant.int 0
%int0_277 = torch.constant.int 0
%int0_278 = torch.constant.int 0
%1042 = torch.aten.select.int %1041, %int0_276, %int0_278 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1043 = torch.aten.item %1042 : !torch.vtensor<[1],si64> -> !torch.int
%1044 = torch.aten.lt.int %1043, %int0_276 : !torch.int, !torch.int -> !torch.bool
%1045 = torch.aten.Int.bool %1044 : !torch.bool -> !torch.int
%1046 = torch.aten.mul.int %1045, %int0_277 : !torch.int, !torch.int -> !torch.int
%1047 = torch.aten.add.int %1043, %1046 : !torch.int, !torch.int -> !torch.int
%1048 = torch.prim.ListConstruct %1047 : (!torch.int) -> !torch.list<int>
%false_279 = torch.constant.bool false
%none_280 = torch.constant.none
%1049 = torch.aten.tensor %1048, %none_280, %none_280, %false_279 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_281, %indices_282 = torch.aten.sort %1049, %int0_276, %false_279 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_283 = torch.constant.int 0
%1050 = torch.aten.select.int %values_281, %int0_276, %int0_283 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1051 = torch.aten.item %1050 : !torch.vtensor<[1],si64> -> !torch.int
%1052 = torch.aten.unsqueeze %1028, %1051 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1053 = torch.prim.ListConstruct %1040, %1052 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_284 = torch.constant.int 0
%1054 = torch.aten.cat %1053, %int0_284 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%1055 = torch.aten._shape_as_tensor %1026 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[4],si64>
%1056 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1057 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1058 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_285 = torch.constant.none
%int1_286 = torch.constant.int 1
%1059 = torch.prim.ListConstruct %int1_286 : (!torch.int) -> !torch.list<int>
%1060 = torch.aten.ones %1059, %none_285, %none_285, %none_285, %none_285 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_287 = torch.constant.int 0
%int0_288 = torch.constant.int 0
%1061 = torch.prim.NumToTensor.Scalar %int0_288 : !torch.int -> !torch.vtensor<[1],si64>
%1062 = torch.aten.index_select %1057, %int0_287, %1061 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1063 = torch.aten.item %1062 : !torch.vtensor<[1],si64> -> !torch.int
%1064 = torch.aten.index_select %1058, %int0_287, %1061 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1065 = torch.aten.item %1064 : !torch.vtensor<[1],si64> -> !torch.int
%1066 = torch.aten.index_select %1056, %int0_287, %1061 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1067 = torch.aten.item %1066 : !torch.vtensor<[1],si64> -> !torch.int
%1068 = torch.aten.index_select %1060, %int0_287, %1061 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1069 = torch.aten.item %1068 : !torch.vtensor<[1],si64> -> !torch.int
%1070 = torch.aten.slice.Tensor %1055, %1067, %1063, %1065, %1069 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_289 = torch.constant.int 4
%none_290 = torch.constant.none
%false_291 = torch.constant.bool false
%1071 = torch.aten.to.dtype %1054, %int4_289, %false_291, %false_291, %none_290 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%1072 = torch.prim.ListConstruct %1070, %1071 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_292 = torch.constant.int 0
%1073 = torch.aten.cat %1072, %int0_292 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%1074 = torch.operator "onnx.Resize"(%1026, %none, %none, %1073) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,80,80],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%1075 = torch.prim.ListConstruct %1074, %392 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,160,160],f32>) -> !torch.list<vtensor>
%int1_293 = torch.constant.int 1
%1076 = torch.aten.cat %1075, %int1_293 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,160,160],f32>
%1077 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1078 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_294 = torch.constant.int 12
%1079 = torch.aten.item %1077 : !torch.vtensor<[],f32> -> !torch.float
%1080 = torch.aten.item %1078 : !torch.vtensor<[],si8> -> !torch.int
%1081 = torch.aten.quantize_per_tensor %1076, %1079, %1080, %int12_294 : !torch.vtensor<[1,?,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%1082 = torch.aten.int_repr %1081 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],si8>
%1083 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1084 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1085 = torch.aten.item %1083 : !torch.vtensor<[],f32> -> !torch.float
%1086 = torch.aten.item %1084 : !torch.vtensor<[],si8> -> !torch.int
%1087 = torch.aten._make_per_tensor_quantized_tensor %1082, %1085, %1086 : !torch.vtensor<[1,?,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%1088 = torch.aten.dequantize.self %1087 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],f32>
%1089 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1090 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_295 = torch.constant.int 12
%1091 = torch.aten.item %1089 : !torch.vtensor<[],f32> -> !torch.float
%1092 = torch.aten.item %1090 : !torch.vtensor<[],si8> -> !torch.int
%1093 = torch.aten.quantize_per_tensor %24, %1091, %1092, %int12_295 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1094 = torch.aten.int_repr %1093 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%1095 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1096 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1097 = torch.aten.item %1095 : !torch.vtensor<[],f32> -> !torch.float
%1098 = torch.aten.item %1096 : !torch.vtensor<[],si8> -> !torch.int
%1099 = torch.aten._make_per_tensor_quantized_tensor %1094, %1097, %1098 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1100 = torch.aten.dequantize.self %1099 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%1101 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1102 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_296 = torch.constant.int 12
%1103 = torch.aten.item %1101 : !torch.vtensor<[],f32> -> !torch.float
%1104 = torch.aten.item %1102 : !torch.vtensor<[],si8> -> !torch.int
%1105 = torch.aten.quantize_per_tensor %25, %1103, %1104, %int12_296 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1106 = torch.aten.int_repr %1105 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1107 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1108 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1109 = torch.aten.item %1107 : !torch.vtensor<[],f32> -> !torch.float
%1110 = torch.aten.item %1108 : !torch.vtensor<[],si8> -> !torch.int
%1111 = torch.aten._make_per_tensor_quantized_tensor %1106, %1109, %1110 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1112 = torch.aten.dequantize.self %1111 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_297 = torch.constant.int 1
%int1_298 = torch.constant.int 1
%int1_299 = torch.constant.int 1
%int1_300 = torch.constant.int 1
%int1_301 = torch.constant.int 1
%int1_302 = torch.constant.int 1
%int0_303 = torch.constant.int 0
%1113 = torch.prim.ListConstruct %int1_297, %int1_298 : (!torch.int, !torch.int) -> !torch.list<int>
%1114 = torch.prim.ListConstruct %int1_299, %int1_300 : (!torch.int, !torch.int) -> !torch.list<int>
%1115 = torch.prim.ListConstruct %int1_301, %int1_302 : (!torch.int, !torch.int) -> !torch.list<int>
%1116 = torch.prim.ListConstruct %int0_303, %int0_303 : (!torch.int, !torch.int) -> !torch.list<int>
%false_304 = torch.constant.bool false
%int1_305 = torch.constant.int 1
%1117 = torch.aten.convolution %1088, %1100, %1112, %1115, %1113, %1114, %false_304, %1116, %int1_305 : !torch.vtensor<[1,?,160,160],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,160,160],f32>
%1118 = torch.aten.relu %1117 : !torch.vtensor<[1,32,160,160],f32> -> !torch.vtensor<[1,32,160,160],f32>
%1119 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1120 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_306 = torch.constant.int 12
%1121 = torch.aten.item %1119 : !torch.vtensor<[],f32> -> !torch.float
%1122 = torch.aten.item %1120 : !torch.vtensor<[],si8> -> !torch.int
%1123 = torch.aten.quantize_per_tensor %1118, %1121, %1122, %int12_306 : !torch.vtensor<[1,32,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%1124 = torch.aten.int_repr %1123 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],si8>
%1125 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1127 = torch.aten.item %1125 : !torch.vtensor<[],f32> -> !torch.float
%1128 = torch.aten.item %1126 : !torch.vtensor<[],si8> -> !torch.int
%1129 = torch.aten._make_per_tensor_quantized_tensor %1124, %1127, %1128 : !torch.vtensor<[1,32,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%1130 = torch.aten.dequantize.self %1129 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],f32>
%1131 = torch.vtensor.literal(dense<320> : tensor<si64>) : !torch.vtensor<[],si64>
%1132 = torch.vtensor.literal(dense<320> : tensor<si64>) : !torch.vtensor<[],si64>
%1133 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_307 = torch.constant.int 0
%int0_308 = torch.constant.int 0
%int0_309 = torch.constant.int 0
%1134 = torch.aten.select.int %1133, %int0_307, %int0_309 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1135 = torch.aten.item %1134 : !torch.vtensor<[1],si64> -> !torch.int
%1136 = torch.aten.lt.int %1135, %int0_307 : !torch.int, !torch.int -> !torch.bool
%1137 = torch.aten.Int.bool %1136 : !torch.bool -> !torch.int
%1138 = torch.aten.mul.int %1137, %int0_308 : !torch.int, !torch.int -> !torch.int
%1139 = torch.aten.add.int %1135, %1138 : !torch.int, !torch.int -> !torch.int
%1140 = torch.prim.ListConstruct %1139 : (!torch.int) -> !torch.list<int>
%false_310 = torch.constant.bool false
%none_311 = torch.constant.none
%1141 = torch.aten.tensor %1140, %none_311, %none_311, %false_310 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_312, %indices_313 = torch.aten.sort %1141, %int0_307, %false_310 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_314 = torch.constant.int 0
%1142 = torch.aten.select.int %values_312, %int0_307, %int0_314 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1143 = torch.aten.item %1142 : !torch.vtensor<[1],si64> -> !torch.int
%1144 = torch.aten.unsqueeze %1131, %1143 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1145 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_315 = torch.constant.int 0
%int0_316 = torch.constant.int 0
%int0_317 = torch.constant.int 0
%1146 = torch.aten.select.int %1145, %int0_315, %int0_317 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1147 = torch.aten.item %1146 : !torch.vtensor<[1],si64> -> !torch.int
%1148 = torch.aten.lt.int %1147, %int0_315 : !torch.int, !torch.int -> !torch.bool
%1149 = torch.aten.Int.bool %1148 : !torch.bool -> !torch.int
%1150 = torch.aten.mul.int %1149, %int0_316 : !torch.int, !torch.int -> !torch.int
%1151 = torch.aten.add.int %1147, %1150 : !torch.int, !torch.int -> !torch.int
%1152 = torch.prim.ListConstruct %1151 : (!torch.int) -> !torch.list<int>
%false_318 = torch.constant.bool false
%none_319 = torch.constant.none
%1153 = torch.aten.tensor %1152, %none_319, %none_319, %false_318 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_320, %indices_321 = torch.aten.sort %1153, %int0_315, %false_318 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_322 = torch.constant.int 0
%1154 = torch.aten.select.int %values_320, %int0_315, %int0_322 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1155 = torch.aten.item %1154 : !torch.vtensor<[1],si64> -> !torch.int
%1156 = torch.aten.unsqueeze %1132, %1155 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1157 = torch.prim.ListConstruct %1144, %1156 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_323 = torch.constant.int 0
%1158 = torch.aten.cat %1157, %int0_323 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%1159 = torch.aten._shape_as_tensor %1130 : !torch.vtensor<[1,32,160,160],f32> -> !torch.vtensor<[4],si64>
%1160 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1161 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1162 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_324 = torch.constant.none
%int1_325 = torch.constant.int 1
%1163 = torch.prim.ListConstruct %int1_325 : (!torch.int) -> !torch.list<int>
%1164 = torch.aten.ones %1163, %none_324, %none_324, %none_324, %none_324 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_326 = torch.constant.int 0
%int0_327 = torch.constant.int 0
%1165 = torch.prim.NumToTensor.Scalar %int0_327 : !torch.int -> !torch.vtensor<[1],si64>
%1166 = torch.aten.index_select %1161, %int0_326, %1165 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1167 = torch.aten.item %1166 : !torch.vtensor<[1],si64> -> !torch.int
%1168 = torch.aten.index_select %1162, %int0_326, %1165 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1169 = torch.aten.item %1168 : !torch.vtensor<[1],si64> -> !torch.int
%1170 = torch.aten.index_select %1160, %int0_326, %1165 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1171 = torch.aten.item %1170 : !torch.vtensor<[1],si64> -> !torch.int
%1172 = torch.aten.index_select %1164, %int0_326, %1165 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1173 = torch.aten.item %1172 : !torch.vtensor<[1],si64> -> !torch.int
%1174 = torch.aten.slice.Tensor %1159, %1171, %1167, %1169, %1173 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_328 = torch.constant.int 4
%none_329 = torch.constant.none
%false_330 = torch.constant.bool false
%1175 = torch.aten.to.dtype %1158, %int4_328, %false_330, %false_330, %none_329 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%1176 = torch.prim.ListConstruct %1174, %1175 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_331 = torch.constant.int 0
%1177 = torch.aten.cat %1176, %int0_331 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%1178 = torch.operator "onnx.Resize"(%1130, %none, %none, %1177) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,160,160],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%1179 = torch.prim.ListConstruct %1178, %333 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,320,320],f32>) -> !torch.list<vtensor>
%int1_332 = torch.constant.int 1
%1180 = torch.aten.cat %1179, %int1_332 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,320,320],f32>
%1181 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_333 = torch.constant.int 12
%1183 = torch.aten.item %1181 : !torch.vtensor<[],f32> -> !torch.float
%1184 = torch.aten.item %1182 : !torch.vtensor<[],si8> -> !torch.int
%1185 = torch.aten.quantize_per_tensor %1180, %1183, %1184, %int12_333 : !torch.vtensor<[1,?,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,320,320],!torch.qint8>
%1186 = torch.aten.int_repr %1185 : !torch.vtensor<[1,?,320,320],!torch.qint8> -> !torch.vtensor<[1,?,320,320],si8>
%1187 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1188 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1189 = torch.aten.item %1187 : !torch.vtensor<[],f32> -> !torch.float
%1190 = torch.aten.item %1188 : !torch.vtensor<[],si8> -> !torch.int
%1191 = torch.aten._make_per_tensor_quantized_tensor %1186, %1189, %1190 : !torch.vtensor<[1,?,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,320,320],!torch.qint8>
%1192 = torch.aten.dequantize.self %1191 : !torch.vtensor<[1,?,320,320],!torch.qint8> -> !torch.vtensor<[1,?,320,320],f32>
%1193 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_334 = torch.constant.int 12
%1195 = torch.aten.item %1193 : !torch.vtensor<[],f32> -> !torch.float
%1196 = torch.aten.item %1194 : !torch.vtensor<[],si8> -> !torch.int
%1197 = torch.aten.quantize_per_tensor %26, %1195, %1196, %int12_334 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%1198 = torch.aten.int_repr %1197 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%1199 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1201 = torch.aten.item %1199 : !torch.vtensor<[],f32> -> !torch.float
%1202 = torch.aten.item %1200 : !torch.vtensor<[],si8> -> !torch.int
%1203 = torch.aten._make_per_tensor_quantized_tensor %1198, %1201, %1202 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%1204 = torch.aten.dequantize.self %1203 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%1205 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_335 = torch.constant.int 12
%1207 = torch.aten.item %1205 : !torch.vtensor<[],f32> -> !torch.float
%1208 = torch.aten.item %1206 : !torch.vtensor<[],si8> -> !torch.int
%1209 = torch.aten.quantize_per_tensor %27, %1207, %1208, %int12_335 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%1210 = torch.aten.int_repr %1209 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%1211 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1213 = torch.aten.item %1211 : !torch.vtensor<[],f32> -> !torch.float
%1214 = torch.aten.item %1212 : !torch.vtensor<[],si8> -> !torch.int
%1215 = torch.aten._make_per_tensor_quantized_tensor %1210, %1213, %1214 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%1216 = torch.aten.dequantize.self %1215 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_336 = torch.constant.int 1
%int1_337 = torch.constant.int 1
%int1_338 = torch.constant.int 1
%int1_339 = torch.constant.int 1
%int1_340 = torch.constant.int 1
%int1_341 = torch.constant.int 1
%int0_342 = torch.constant.int 0
%1217 = torch.prim.ListConstruct %int1_336, %int1_337 : (!torch.int, !torch.int) -> !torch.list<int>
%1218 = torch.prim.ListConstruct %int1_338, %int1_339 : (!torch.int, !torch.int) -> !torch.list<int>
%1219 = torch.prim.ListConstruct %int1_340, %int1_341 : (!torch.int, !torch.int) -> !torch.list<int>
%1220 = torch.prim.ListConstruct %int0_342, %int0_342 : (!torch.int, !torch.int) -> !torch.list<int>
%false_343 = torch.constant.bool false
%int1_344 = torch.constant.int 1
%1221 = torch.aten.convolution %1192, %1204, %1216, %1219, %1217, %1218, %false_343, %1220, %int1_344 : !torch.vtensor<[1,?,320,320],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,320,320],f32>
%1222 = torch.aten.relu %1221 : !torch.vtensor<[1,64,320,320],f32> -> !torch.vtensor<[1,64,320,320],f32>
%1223 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1224 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_345 = torch.constant.int 12
%1225 = torch.aten.item %1223 : !torch.vtensor<[],f32> -> !torch.float
%1226 = torch.aten.item %1224 : !torch.vtensor<[],si8> -> !torch.int
%1227 = torch.aten.quantize_per_tensor %1222, %1225, %1226, %int12_345 : !torch.vtensor<[1,64,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%1228 = torch.aten.int_repr %1227 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],si8>
%1229 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1230 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1231 = torch.aten.item %1229 : !torch.vtensor<[],f32> -> !torch.float
%1232 = torch.aten.item %1230 : !torch.vtensor<[],si8> -> !torch.int
%1233 = torch.aten._make_per_tensor_quantized_tensor %1228, %1231, %1232 : !torch.vtensor<[1,64,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%1234 = torch.aten.dequantize.self %1233 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],f32>
%int1_346 = torch.constant.int 1
%1235 = torch.aten.add.Tensor %1234, %291, %int1_346 : !torch.vtensor<[1,64,320,320],f32>, !torch.vtensor<[1,64,320,320],f32>, !torch.int -> !torch.vtensor<[1,64,320,320],f32>
%1236 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1237 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_347 = torch.constant.int 12
%1238 = torch.aten.item %1236 : !torch.vtensor<[],f32> -> !torch.float
%1239 = torch.aten.item %1237 : !torch.vtensor<[],si8> -> !torch.int
%1240 = torch.aten.quantize_per_tensor %1235, %1238, %1239, %int12_347 : !torch.vtensor<[1,64,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%1241 = torch.aten.int_repr %1240 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],si8>
%1242 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1243 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1244 = torch.aten.item %1242 : !torch.vtensor<[],f32> -> !torch.float
%1245 = torch.aten.item %1243 : !torch.vtensor<[],si8> -> !torch.int
%1246 = torch.aten._make_per_tensor_quantized_tensor %1241, %1244, %1245 : !torch.vtensor<[1,64,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%1247 = torch.aten.dequantize.self %1246 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],f32>
%int2_348 = torch.constant.int 2
%int2_349 = torch.constant.int 2
%1248 = torch.prim.ListConstruct %int2_348, %int2_349 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_350 = torch.constant.int 0
%int0_351 = torch.constant.int 0
%1249 = torch.prim.ListConstruct %int0_350, %int0_351 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_352 = torch.constant.int 2
%int2_353 = torch.constant.int 2
%1250 = torch.prim.ListConstruct %int2_352, %int2_353 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_354 = torch.constant.int 1
%int1_355 = torch.constant.int 1
%1251 = torch.prim.ListConstruct %int1_354, %int1_355 : (!torch.int, !torch.int) -> !torch.list<int>
%true_356 = torch.constant.bool true
%1252 = torch.aten.max_pool2d %1247, %1248, %1250, %1249, %1251, %true_356 : !torch.vtensor<[1,64,320,320],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,160,160],f32>
%1253 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1254 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_357 = torch.constant.int 12
%1255 = torch.aten.item %1253 : !torch.vtensor<[],f32> -> !torch.float
%1256 = torch.aten.item %1254 : !torch.vtensor<[],si8> -> !torch.int
%1257 = torch.aten.quantize_per_tensor %1252, %1255, %1256, %int12_357 : !torch.vtensor<[1,64,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%1258 = torch.aten.int_repr %1257 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],si8>
%1259 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1260 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1261 = torch.aten.item %1259 : !torch.vtensor<[],f32> -> !torch.float
%1262 = torch.aten.item %1260 : !torch.vtensor<[],si8> -> !torch.int
%1263 = torch.aten._make_per_tensor_quantized_tensor %1258, %1261, %1262 : !torch.vtensor<[1,64,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%1264 = torch.aten.dequantize.self %1263 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],f32>
%1265 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1266 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_358 = torch.constant.int 12
%1267 = torch.aten.item %1265 : !torch.vtensor<[],f32> -> !torch.float
%1268 = torch.aten.item %1266 : !torch.vtensor<[],si8> -> !torch.int
%1269 = torch.aten.quantize_per_tensor %28, %1267, %1268, %int12_358 : !torch.vtensor<[128,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,64,3,3],!torch.qint8>
%1270 = torch.aten.int_repr %1269 : !torch.vtensor<[128,64,3,3],!torch.qint8> -> !torch.vtensor<[128,64,3,3],si8>
%1271 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1272 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1273 = torch.aten.item %1271 : !torch.vtensor<[],f32> -> !torch.float
%1274 = torch.aten.item %1272 : !torch.vtensor<[],si8> -> !torch.int
%1275 = torch.aten._make_per_tensor_quantized_tensor %1270, %1273, %1274 : !torch.vtensor<[128,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,64,3,3],!torch.qint8>
%1276 = torch.aten.dequantize.self %1275 : !torch.vtensor<[128,64,3,3],!torch.qint8> -> !torch.vtensor<[128,64,3,3],f32>
%1277 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1278 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_359 = torch.constant.int 12
%1279 = torch.aten.item %1277 : !torch.vtensor<[],f32> -> !torch.float
%1280 = torch.aten.item %1278 : !torch.vtensor<[],si8> -> !torch.int
%1281 = torch.aten.quantize_per_tensor %29, %1279, %1280, %int12_359 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1282 = torch.aten.int_repr %1281 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1283 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1284 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1285 = torch.aten.item %1283 : !torch.vtensor<[],f32> -> !torch.float
%1286 = torch.aten.item %1284 : !torch.vtensor<[],si8> -> !torch.int
%1287 = torch.aten._make_per_tensor_quantized_tensor %1282, %1285, %1286 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1288 = torch.aten.dequantize.self %1287 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_360 = torch.constant.int 1
%int1_361 = torch.constant.int 1
%int1_362 = torch.constant.int 1
%int1_363 = torch.constant.int 1
%int1_364 = torch.constant.int 1
%int1_365 = torch.constant.int 1
%int0_366 = torch.constant.int 0
%1289 = torch.prim.ListConstruct %int1_360, %int1_361 : (!torch.int, !torch.int) -> !torch.list<int>
%1290 = torch.prim.ListConstruct %int1_362, %int1_363 : (!torch.int, !torch.int) -> !torch.list<int>
%1291 = torch.prim.ListConstruct %int1_364, %int1_365 : (!torch.int, !torch.int) -> !torch.list<int>
%1292 = torch.prim.ListConstruct %int0_366, %int0_366 : (!torch.int, !torch.int) -> !torch.list<int>
%false_367 = torch.constant.bool false
%int1_368 = torch.constant.int 1
%1293 = torch.aten.convolution %1264, %1276, %1288, %1291, %1289, %1290, %false_367, %1292, %int1_368 : !torch.vtensor<[1,64,160,160],f32>, !torch.vtensor<[128,64,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,160,160],f32>
%1294 = torch.aten.relu %1293 : !torch.vtensor<[1,128,160,160],f32> -> !torch.vtensor<[1,128,160,160],f32>
%1295 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1296 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_369 = torch.constant.int 12
%1297 = torch.aten.item %1295 : !torch.vtensor<[],f32> -> !torch.float
%1298 = torch.aten.item %1296 : !torch.vtensor<[],si8> -> !torch.int
%1299 = torch.aten.quantize_per_tensor %1294, %1297, %1298, %int12_369 : !torch.vtensor<[1,128,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,160,160],!torch.qint8>
%1300 = torch.aten.int_repr %1299 : !torch.vtensor<[1,128,160,160],!torch.qint8> -> !torch.vtensor<[1,128,160,160],si8>
%1301 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1302 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1303 = torch.aten.item %1301 : !torch.vtensor<[],f32> -> !torch.float
%1304 = torch.aten.item %1302 : !torch.vtensor<[],si8> -> !torch.int
%1305 = torch.aten._make_per_tensor_quantized_tensor %1300, %1303, %1304 : !torch.vtensor<[1,128,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,160,160],!torch.qint8>
%1306 = torch.aten.dequantize.self %1305 : !torch.vtensor<[1,128,160,160],!torch.qint8> -> !torch.vtensor<[1,128,160,160],f32>
%1307 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1308 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_370 = torch.constant.int 12
%1309 = torch.aten.item %1307 : !torch.vtensor<[],f32> -> !torch.float
%1310 = torch.aten.item %1308 : !torch.vtensor<[],si8> -> !torch.int
%1311 = torch.aten.quantize_per_tensor %30, %1309, %1310, %int12_370 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8>
%1312 = torch.aten.int_repr %1311 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8>
%1313 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1314 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1315 = torch.aten.item %1313 : !torch.vtensor<[],f32> -> !torch.float
%1316 = torch.aten.item %1314 : !torch.vtensor<[],si8> -> !torch.int
%1317 = torch.aten._make_per_tensor_quantized_tensor %1312, %1315, %1316 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8>
%1318 = torch.aten.dequantize.self %1317 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32>
%1319 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1320 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_371 = torch.constant.int 12
%1321 = torch.aten.item %1319 : !torch.vtensor<[],f32> -> !torch.float
%1322 = torch.aten.item %1320 : !torch.vtensor<[],si8> -> !torch.int
%1323 = torch.aten.quantize_per_tensor %31, %1321, %1322, %int12_371 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1324 = torch.aten.int_repr %1323 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1325 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1326 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1327 = torch.aten.item %1325 : !torch.vtensor<[],f32> -> !torch.float
%1328 = torch.aten.item %1326 : !torch.vtensor<[],si8> -> !torch.int
%1329 = torch.aten._make_per_tensor_quantized_tensor %1324, %1327, %1328 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1330 = torch.aten.dequantize.self %1329 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_372 = torch.constant.int 1
%int1_373 = torch.constant.int 1
%int1_374 = torch.constant.int 1
%int1_375 = torch.constant.int 1
%int1_376 = torch.constant.int 1
%int1_377 = torch.constant.int 1
%int0_378 = torch.constant.int 0
%1331 = torch.prim.ListConstruct %int1_372, %int1_373 : (!torch.int, !torch.int) -> !torch.list<int>
%1332 = torch.prim.ListConstruct %int1_374, %int1_375 : (!torch.int, !torch.int) -> !torch.list<int>
%1333 = torch.prim.ListConstruct %int1_376, %int1_377 : (!torch.int, !torch.int) -> !torch.list<int>
%1334 = torch.prim.ListConstruct %int0_378, %int0_378 : (!torch.int, !torch.int) -> !torch.list<int>
%false_379 = torch.constant.bool false
%int1_380 = torch.constant.int 1
%1335 = torch.aten.convolution %1306, %1318, %1330, %1333, %1331, %1332, %false_379, %1334, %int1_380 : !torch.vtensor<[1,128,160,160],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,160,160],f32>
%1336 = torch.aten.relu %1335 : !torch.vtensor<[1,32,160,160],f32> -> !torch.vtensor<[1,32,160,160],f32>
%1337 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1338 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_381 = torch.constant.int 12
%1339 = torch.aten.item %1337 : !torch.vtensor<[],f32> -> !torch.float
%1340 = torch.aten.item %1338 : !torch.vtensor<[],si8> -> !torch.int
%1341 = torch.aten.quantize_per_tensor %1336, %1339, %1340, %int12_381 : !torch.vtensor<[1,32,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%1342 = torch.aten.int_repr %1341 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],si8>
%1343 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1344 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1345 = torch.aten.item %1343 : !torch.vtensor<[],f32> -> !torch.float
%1346 = torch.aten.item %1344 : !torch.vtensor<[],si8> -> !torch.int
%1347 = torch.aten._make_per_tensor_quantized_tensor %1342, %1345, %1346 : !torch.vtensor<[1,32,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%1348 = torch.aten.dequantize.self %1347 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],f32>
%int2_382 = torch.constant.int 2
%int2_383 = torch.constant.int 2
%1349 = torch.prim.ListConstruct %int2_382, %int2_383 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_384 = torch.constant.int 0
%int0_385 = torch.constant.int 0
%1350 = torch.prim.ListConstruct %int0_384, %int0_385 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_386 = torch.constant.int 2
%int2_387 = torch.constant.int 2
%1351 = torch.prim.ListConstruct %int2_386, %int2_387 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_388 = torch.constant.int 1
%int1_389 = torch.constant.int 1
%1352 = torch.prim.ListConstruct %int1_388, %int1_389 : (!torch.int, !torch.int) -> !torch.list<int>
%true_390 = torch.constant.bool true
%1353 = torch.aten.max_pool2d %1348, %1349, %1351, %1350, %1352, %true_390 : !torch.vtensor<[1,32,160,160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,80,80],f32>
%1354 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1355 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_391 = torch.constant.int 12
%1356 = torch.aten.item %1354 : !torch.vtensor<[],f32> -> !torch.float
%1357 = torch.aten.item %1355 : !torch.vtensor<[],si8> -> !torch.int
%1358 = torch.aten.quantize_per_tensor %1353, %1356, %1357, %int12_391 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1359 = torch.aten.int_repr %1358 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%1360 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1361 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1362 = torch.aten.item %1360 : !torch.vtensor<[],f32> -> !torch.float
%1363 = torch.aten.item %1361 : !torch.vtensor<[],si8> -> !torch.int
%1364 = torch.aten._make_per_tensor_quantized_tensor %1359, %1362, %1363 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1365 = torch.aten.dequantize.self %1364 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%1366 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1367 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_392 = torch.constant.int 12
%1368 = torch.aten.item %1366 : !torch.vtensor<[],f32> -> !torch.float
%1369 = torch.aten.item %1367 : !torch.vtensor<[],si8> -> !torch.int
%1370 = torch.aten.quantize_per_tensor %32, %1368, %1369, %int12_392 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1371 = torch.aten.int_repr %1370 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%1372 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1373 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1374 = torch.aten.item %1372 : !torch.vtensor<[],f32> -> !torch.float
%1375 = torch.aten.item %1373 : !torch.vtensor<[],si8> -> !torch.int
%1376 = torch.aten._make_per_tensor_quantized_tensor %1371, %1374, %1375 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1377 = torch.aten.dequantize.self %1376 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%1378 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1379 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_393 = torch.constant.int 12
%1380 = torch.aten.item %1378 : !torch.vtensor<[],f32> -> !torch.float
%1381 = torch.aten.item %1379 : !torch.vtensor<[],si8> -> !torch.int
%1382 = torch.aten.quantize_per_tensor %33, %1380, %1381, %int12_393 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1383 = torch.aten.int_repr %1382 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1384 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1385 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1386 = torch.aten.item %1384 : !torch.vtensor<[],f32> -> !torch.float
%1387 = torch.aten.item %1385 : !torch.vtensor<[],si8> -> !torch.int
%1388 = torch.aten._make_per_tensor_quantized_tensor %1383, %1386, %1387 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1389 = torch.aten.dequantize.self %1388 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_394 = torch.constant.int 1
%int1_395 = torch.constant.int 1
%int1_396 = torch.constant.int 1
%int1_397 = torch.constant.int 1
%int1_398 = torch.constant.int 1
%int1_399 = torch.constant.int 1
%int0_400 = torch.constant.int 0
%1390 = torch.prim.ListConstruct %int1_394, %int1_395 : (!torch.int, !torch.int) -> !torch.list<int>
%1391 = torch.prim.ListConstruct %int1_396, %int1_397 : (!torch.int, !torch.int) -> !torch.list<int>
%1392 = torch.prim.ListConstruct %int1_398, %int1_399 : (!torch.int, !torch.int) -> !torch.list<int>
%1393 = torch.prim.ListConstruct %int0_400, %int0_400 : (!torch.int, !torch.int) -> !torch.list<int>
%false_401 = torch.constant.bool false
%int1_402 = torch.constant.int 1
%1394 = torch.aten.convolution %1365, %1377, %1389, %1392, %1390, %1391, %false_401, %1393, %int1_402 : !torch.vtensor<[1,32,80,80],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,80,80],f32>
%1395 = torch.aten.relu %1394 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[1,32,80,80],f32>
%1396 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1397 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_403 = torch.constant.int 12
%1398 = torch.aten.item %1396 : !torch.vtensor<[],f32> -> !torch.float
%1399 = torch.aten.item %1397 : !torch.vtensor<[],si8> -> !torch.int
%1400 = torch.aten.quantize_per_tensor %1395, %1398, %1399, %int12_403 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1401 = torch.aten.int_repr %1400 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%1402 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1403 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1404 = torch.aten.item %1402 : !torch.vtensor<[],f32> -> !torch.float
%1405 = torch.aten.item %1403 : !torch.vtensor<[],si8> -> !torch.int
%1406 = torch.aten._make_per_tensor_quantized_tensor %1401, %1404, %1405 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1407 = torch.aten.dequantize.self %1406 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%int2_404 = torch.constant.int 2
%int2_405 = torch.constant.int 2
%1408 = torch.prim.ListConstruct %int2_404, %int2_405 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_406 = torch.constant.int 0
%int0_407 = torch.constant.int 0
%1409 = torch.prim.ListConstruct %int0_406, %int0_407 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_408 = torch.constant.int 2
%int2_409 = torch.constant.int 2
%1410 = torch.prim.ListConstruct %int2_408, %int2_409 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_410 = torch.constant.int 1
%int1_411 = torch.constant.int 1
%1411 = torch.prim.ListConstruct %int1_410, %int1_411 : (!torch.int, !torch.int) -> !torch.list<int>
%true_412 = torch.constant.bool true
%1412 = torch.aten.max_pool2d %1407, %1408, %1410, %1409, %1411, %true_412 : !torch.vtensor<[1,32,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,40,40],f32>
%1413 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1414 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_413 = torch.constant.int 12
%1415 = torch.aten.item %1413 : !torch.vtensor<[],f32> -> !torch.float
%1416 = torch.aten.item %1414 : !torch.vtensor<[],si8> -> !torch.int
%1417 = torch.aten.quantize_per_tensor %1412, %1415, %1416, %int12_413 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%1418 = torch.aten.int_repr %1417 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%1419 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1420 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1421 = torch.aten.item %1419 : !torch.vtensor<[],f32> -> !torch.float
%1422 = torch.aten.item %1420 : !torch.vtensor<[],si8> -> !torch.int
%1423 = torch.aten._make_per_tensor_quantized_tensor %1418, %1421, %1422 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%1424 = torch.aten.dequantize.self %1423 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%1425 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1426 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_414 = torch.constant.int 12
%1427 = torch.aten.item %1425 : !torch.vtensor<[],f32> -> !torch.float
%1428 = torch.aten.item %1426 : !torch.vtensor<[],si8> -> !torch.int
%1429 = torch.aten.quantize_per_tensor %34, %1427, %1428, %int12_414 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1430 = torch.aten.int_repr %1429 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%1431 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1432 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1433 = torch.aten.item %1431 : !torch.vtensor<[],f32> -> !torch.float
%1434 = torch.aten.item %1432 : !torch.vtensor<[],si8> -> !torch.int
%1435 = torch.aten._make_per_tensor_quantized_tensor %1430, %1433, %1434 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1436 = torch.aten.dequantize.self %1435 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%1437 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1438 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_415 = torch.constant.int 12
%1439 = torch.aten.item %1437 : !torch.vtensor<[],f32> -> !torch.float
%1440 = torch.aten.item %1438 : !torch.vtensor<[],si8> -> !torch.int
%1441 = torch.aten.quantize_per_tensor %35, %1439, %1440, %int12_415 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1442 = torch.aten.int_repr %1441 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1443 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1444 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1445 = torch.aten.item %1443 : !torch.vtensor<[],f32> -> !torch.float
%1446 = torch.aten.item %1444 : !torch.vtensor<[],si8> -> !torch.int
%1447 = torch.aten._make_per_tensor_quantized_tensor %1442, %1445, %1446 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1448 = torch.aten.dequantize.self %1447 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_416 = torch.constant.int 1
%int1_417 = torch.constant.int 1
%int1_418 = torch.constant.int 1
%int1_419 = torch.constant.int 1
%int1_420 = torch.constant.int 1
%int1_421 = torch.constant.int 1
%int0_422 = torch.constant.int 0
%1449 = torch.prim.ListConstruct %int1_416, %int1_417 : (!torch.int, !torch.int) -> !torch.list<int>
%1450 = torch.prim.ListConstruct %int1_418, %int1_419 : (!torch.int, !torch.int) -> !torch.list<int>
%1451 = torch.prim.ListConstruct %int1_420, %int1_421 : (!torch.int, !torch.int) -> !torch.list<int>
%1452 = torch.prim.ListConstruct %int0_422, %int0_422 : (!torch.int, !torch.int) -> !torch.list<int>
%false_423 = torch.constant.bool false
%int1_424 = torch.constant.int 1
%1453 = torch.aten.convolution %1424, %1436, %1448, %1451, %1449, %1450, %false_423, %1452, %int1_424 : !torch.vtensor<[1,32,40,40],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,40,40],f32>
%1454 = torch.aten.relu %1453 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[1,32,40,40],f32>
%1455 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1456 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_425 = torch.constant.int 12
%1457 = torch.aten.item %1455 : !torch.vtensor<[],f32> -> !torch.float
%1458 = torch.aten.item %1456 : !torch.vtensor<[],si8> -> !torch.int
%1459 = torch.aten.quantize_per_tensor %1454, %1457, %1458, %int12_425 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%1460 = torch.aten.int_repr %1459 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%1461 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1462 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1463 = torch.aten.item %1461 : !torch.vtensor<[],f32> -> !torch.float
%1464 = torch.aten.item %1462 : !torch.vtensor<[],si8> -> !torch.int
%1465 = torch.aten._make_per_tensor_quantized_tensor %1460, %1463, %1464 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%1466 = torch.aten.dequantize.self %1465 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%int2_426 = torch.constant.int 2
%int2_427 = torch.constant.int 2
%1467 = torch.prim.ListConstruct %int2_426, %int2_427 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_428 = torch.constant.int 0
%int0_429 = torch.constant.int 0
%1468 = torch.prim.ListConstruct %int0_428, %int0_429 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_430 = torch.constant.int 2
%int2_431 = torch.constant.int 2
%1469 = torch.prim.ListConstruct %int2_430, %int2_431 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_432 = torch.constant.int 1
%int1_433 = torch.constant.int 1
%1470 = torch.prim.ListConstruct %int1_432, %int1_433 : (!torch.int, !torch.int) -> !torch.list<int>
%true_434 = torch.constant.bool true
%1471 = torch.aten.max_pool2d %1466, %1467, %1469, %1468, %1470, %true_434 : !torch.vtensor<[1,32,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,20,20],f32>
%1472 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1473 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_435 = torch.constant.int 12
%1474 = torch.aten.item %1472 : !torch.vtensor<[],f32> -> !torch.float
%1475 = torch.aten.item %1473 : !torch.vtensor<[],si8> -> !torch.int
%1476 = torch.aten.quantize_per_tensor %1471, %1474, %1475, %int12_435 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%1477 = torch.aten.int_repr %1476 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%1478 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1479 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1480 = torch.aten.item %1478 : !torch.vtensor<[],f32> -> !torch.float
%1481 = torch.aten.item %1479 : !torch.vtensor<[],si8> -> !torch.int
%1482 = torch.aten._make_per_tensor_quantized_tensor %1477, %1480, %1481 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%1483 = torch.aten.dequantize.self %1482 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%1484 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1485 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_436 = torch.constant.int 12
%1486 = torch.aten.item %1484 : !torch.vtensor<[],f32> -> !torch.float
%1487 = torch.aten.item %1485 : !torch.vtensor<[],si8> -> !torch.int
%1488 = torch.aten.quantize_per_tensor %36, %1486, %1487, %int12_436 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1489 = torch.aten.int_repr %1488 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%1490 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1491 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1492 = torch.aten.item %1490 : !torch.vtensor<[],f32> -> !torch.float
%1493 = torch.aten.item %1491 : !torch.vtensor<[],si8> -> !torch.int
%1494 = torch.aten._make_per_tensor_quantized_tensor %1489, %1492, %1493 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1495 = torch.aten.dequantize.self %1494 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%1496 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1497 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_437 = torch.constant.int 12
%1498 = torch.aten.item %1496 : !torch.vtensor<[],f32> -> !torch.float
%1499 = torch.aten.item %1497 : !torch.vtensor<[],si8> -> !torch.int
%1500 = torch.aten.quantize_per_tensor %37, %1498, %1499, %int12_437 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1501 = torch.aten.int_repr %1500 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1502 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1503 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1504 = torch.aten.item %1502 : !torch.vtensor<[],f32> -> !torch.float
%1505 = torch.aten.item %1503 : !torch.vtensor<[],si8> -> !torch.int
%1506 = torch.aten._make_per_tensor_quantized_tensor %1501, %1504, %1505 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1507 = torch.aten.dequantize.self %1506 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_438 = torch.constant.int 1
%int1_439 = torch.constant.int 1
%int1_440 = torch.constant.int 1
%int1_441 = torch.constant.int 1
%int1_442 = torch.constant.int 1
%int1_443 = torch.constant.int 1
%int0_444 = torch.constant.int 0
%1508 = torch.prim.ListConstruct %int1_438, %int1_439 : (!torch.int, !torch.int) -> !torch.list<int>
%1509 = torch.prim.ListConstruct %int1_440, %int1_441 : (!torch.int, !torch.int) -> !torch.list<int>
%1510 = torch.prim.ListConstruct %int1_442, %int1_443 : (!torch.int, !torch.int) -> !torch.list<int>
%1511 = torch.prim.ListConstruct %int0_444, %int0_444 : (!torch.int, !torch.int) -> !torch.list<int>
%false_445 = torch.constant.bool false
%int1_446 = torch.constant.int 1
%1512 = torch.aten.convolution %1483, %1495, %1507, %1510, %1508, %1509, %false_445, %1511, %int1_446 : !torch.vtensor<[1,32,20,20],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,20,20],f32>
%1513 = torch.aten.relu %1512 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[1,32,20,20],f32>
%1514 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1515 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_447 = torch.constant.int 12
%1516 = torch.aten.item %1514 : !torch.vtensor<[],f32> -> !torch.float
%1517 = torch.aten.item %1515 : !torch.vtensor<[],si8> -> !torch.int
%1518 = torch.aten.quantize_per_tensor %1513, %1516, %1517, %int12_447 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%1519 = torch.aten.int_repr %1518 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%1520 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1522 = torch.aten.item %1520 : !torch.vtensor<[],f32> -> !torch.float
%1523 = torch.aten.item %1521 : !torch.vtensor<[],si8> -> !torch.int
%1524 = torch.aten._make_per_tensor_quantized_tensor %1519, %1522, %1523 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%1525 = torch.aten.dequantize.self %1524 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%int2_448 = torch.constant.int 2
%int2_449 = torch.constant.int 2
%1526 = torch.prim.ListConstruct %int2_448, %int2_449 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_450 = torch.constant.int 0
%int0_451 = torch.constant.int 0
%1527 = torch.prim.ListConstruct %int0_450, %int0_451 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_452 = torch.constant.int 2
%int2_453 = torch.constant.int 2
%1528 = torch.prim.ListConstruct %int2_452, %int2_453 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_454 = torch.constant.int 1
%int1_455 = torch.constant.int 1
%1529 = torch.prim.ListConstruct %int1_454, %int1_455 : (!torch.int, !torch.int) -> !torch.list<int>
%true_456 = torch.constant.bool true
%1530 = torch.aten.max_pool2d %1525, %1526, %1528, %1527, %1529, %true_456 : !torch.vtensor<[1,32,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,10,10],f32>
%1531 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1532 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_457 = torch.constant.int 12
%1533 = torch.aten.item %1531 : !torch.vtensor<[],f32> -> !torch.float
%1534 = torch.aten.item %1532 : !torch.vtensor<[],si8> -> !torch.int
%1535 = torch.aten.quantize_per_tensor %1530, %1533, %1534, %int12_457 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%1536 = torch.aten.int_repr %1535 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%1537 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1538 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1539 = torch.aten.item %1537 : !torch.vtensor<[],f32> -> !torch.float
%1540 = torch.aten.item %1538 : !torch.vtensor<[],si8> -> !torch.int
%1541 = torch.aten._make_per_tensor_quantized_tensor %1536, %1539, %1540 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%1542 = torch.aten.dequantize.self %1541 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%1543 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1544 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_458 = torch.constant.int 12
%1545 = torch.aten.item %1543 : !torch.vtensor<[],f32> -> !torch.float
%1546 = torch.aten.item %1544 : !torch.vtensor<[],si8> -> !torch.int
%1547 = torch.aten.quantize_per_tensor %38, %1545, %1546, %int12_458 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1548 = torch.aten.int_repr %1547 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%1549 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1550 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1551 = torch.aten.item %1549 : !torch.vtensor<[],f32> -> !torch.float
%1552 = torch.aten.item %1550 : !torch.vtensor<[],si8> -> !torch.int
%1553 = torch.aten._make_per_tensor_quantized_tensor %1548, %1551, %1552 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1554 = torch.aten.dequantize.self %1553 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%1555 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1556 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_459 = torch.constant.int 12
%1557 = torch.aten.item %1555 : !torch.vtensor<[],f32> -> !torch.float
%1558 = torch.aten.item %1556 : !torch.vtensor<[],si8> -> !torch.int
%1559 = torch.aten.quantize_per_tensor %39, %1557, %1558, %int12_459 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1560 = torch.aten.int_repr %1559 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1561 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1562 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1563 = torch.aten.item %1561 : !torch.vtensor<[],f32> -> !torch.float
%1564 = torch.aten.item %1562 : !torch.vtensor<[],si8> -> !torch.int
%1565 = torch.aten._make_per_tensor_quantized_tensor %1560, %1563, %1564 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1566 = torch.aten.dequantize.self %1565 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_460 = torch.constant.int 1
%int1_461 = torch.constant.int 1
%int1_462 = torch.constant.int 1
%int1_463 = torch.constant.int 1
%int1_464 = torch.constant.int 1
%int1_465 = torch.constant.int 1
%int0_466 = torch.constant.int 0
%1567 = torch.prim.ListConstruct %int1_460, %int1_461 : (!torch.int, !torch.int) -> !torch.list<int>
%1568 = torch.prim.ListConstruct %int1_462, %int1_463 : (!torch.int, !torch.int) -> !torch.list<int>
%1569 = torch.prim.ListConstruct %int1_464, %int1_465 : (!torch.int, !torch.int) -> !torch.list<int>
%1570 = torch.prim.ListConstruct %int0_466, %int0_466 : (!torch.int, !torch.int) -> !torch.list<int>
%false_467 = torch.constant.bool false
%int1_468 = torch.constant.int 1
%1571 = torch.aten.convolution %1542, %1554, %1566, %1569, %1567, %1568, %false_467, %1570, %int1_468 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%1572 = torch.aten.relu %1571 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%1573 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1574 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_469 = torch.constant.int 12
%1575 = torch.aten.item %1573 : !torch.vtensor<[],f32> -> !torch.float
%1576 = torch.aten.item %1574 : !torch.vtensor<[],si8> -> !torch.int
%1577 = torch.aten.quantize_per_tensor %1572, %1575, %1576, %int12_469 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%1578 = torch.aten.int_repr %1577 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%1579 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1580 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1581 = torch.aten.item %1579 : !torch.vtensor<[],f32> -> !torch.float
%1582 = torch.aten.item %1580 : !torch.vtensor<[],si8> -> !torch.int
%1583 = torch.aten._make_per_tensor_quantized_tensor %1578, %1581, %1582 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%1584 = torch.aten.dequantize.self %1583 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%1585 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1586 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_470 = torch.constant.int 12
%1587 = torch.aten.item %1585 : !torch.vtensor<[],f32> -> !torch.float
%1588 = torch.aten.item %1586 : !torch.vtensor<[],si8> -> !torch.int
%1589 = torch.aten.quantize_per_tensor %40, %1587, %1588, %int12_470 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1590 = torch.aten.int_repr %1589 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%1591 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1592 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1593 = torch.aten.item %1591 : !torch.vtensor<[],f32> -> !torch.float
%1594 = torch.aten.item %1592 : !torch.vtensor<[],si8> -> !torch.int
%1595 = torch.aten._make_per_tensor_quantized_tensor %1590, %1593, %1594 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%1596 = torch.aten.dequantize.self %1595 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%1597 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1598 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_471 = torch.constant.int 12
%1599 = torch.aten.item %1597 : !torch.vtensor<[],f32> -> !torch.float
%1600 = torch.aten.item %1598 : !torch.vtensor<[],si8> -> !torch.int
%1601 = torch.aten.quantize_per_tensor %41, %1599, %1600, %int12_471 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1602 = torch.aten.int_repr %1601 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1603 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1604 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1605 = torch.aten.item %1603 : !torch.vtensor<[],f32> -> !torch.float
%1606 = torch.aten.item %1604 : !torch.vtensor<[],si8> -> !torch.int
%1607 = torch.aten._make_per_tensor_quantized_tensor %1602, %1605, %1606 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1608 = torch.aten.dequantize.self %1607 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int2_472 = torch.constant.int 2
%int2_473 = torch.constant.int 2
%int2_474 = torch.constant.int 2
%int2_475 = torch.constant.int 2
%int1_476 = torch.constant.int 1
%int1_477 = torch.constant.int 1
%int0_478 = torch.constant.int 0
%1609 = torch.prim.ListConstruct %int2_472, %int2_473 : (!torch.int, !torch.int) -> !torch.list<int>
%1610 = torch.prim.ListConstruct %int2_474, %int2_475 : (!torch.int, !torch.int) -> !torch.list<int>
%1611 = torch.prim.ListConstruct %int1_476, %int1_477 : (!torch.int, !torch.int) -> !torch.list<int>
%1612 = torch.prim.ListConstruct %int0_478, %int0_478 : (!torch.int, !torch.int) -> !torch.list<int>
%false_479 = torch.constant.bool false
%int1_480 = torch.constant.int 1
%1613 = torch.aten.convolution %1584, %1596, %1608, %1611, %1609, %1610, %false_479, %1612, %int1_480 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%1614 = torch.aten.relu %1613 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%1615 = torch.prim.ListConstruct %1614, %1584 : (!torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[1,32,10,10],f32>) -> !torch.list<vtensor>
%int1_481 = torch.constant.int 1
%1616 = torch.aten.cat %1615, %int1_481 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%1617 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1618 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_482 = torch.constant.int 12
%1619 = torch.aten.item %1617 : !torch.vtensor<[],f32> -> !torch.float
%1620 = torch.aten.item %1618 : !torch.vtensor<[],si8> -> !torch.int
%1621 = torch.aten.quantize_per_tensor %1616, %1619, %1620, %int12_482 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%1622 = torch.aten.int_repr %1621 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%1623 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1624 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1625 = torch.aten.item %1623 : !torch.vtensor<[],f32> -> !torch.float
%1626 = torch.aten.item %1624 : !torch.vtensor<[],si8> -> !torch.int
%1627 = torch.aten._make_per_tensor_quantized_tensor %1622, %1625, %1626 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%1628 = torch.aten.dequantize.self %1627 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%1629 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1630 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_483 = torch.constant.int 12
%1631 = torch.aten.item %1629 : !torch.vtensor<[],f32> -> !torch.float
%1632 = torch.aten.item %1630 : !torch.vtensor<[],si8> -> !torch.int
%1633 = torch.aten.quantize_per_tensor %42, %1631, %1632, %int12_483 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1634 = torch.aten.int_repr %1633 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%1635 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1636 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1637 = torch.aten.item %1635 : !torch.vtensor<[],f32> -> !torch.float
%1638 = torch.aten.item %1636 : !torch.vtensor<[],si8> -> !torch.int
%1639 = torch.aten._make_per_tensor_quantized_tensor %1634, %1637, %1638 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1640 = torch.aten.dequantize.self %1639 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%1641 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1642 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_484 = torch.constant.int 12
%1643 = torch.aten.item %1641 : !torch.vtensor<[],f32> -> !torch.float
%1644 = torch.aten.item %1642 : !torch.vtensor<[],si8> -> !torch.int
%1645 = torch.aten.quantize_per_tensor %43, %1643, %1644, %int12_484 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1646 = torch.aten.int_repr %1645 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1647 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1648 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1649 = torch.aten.item %1647 : !torch.vtensor<[],f32> -> !torch.float
%1650 = torch.aten.item %1648 : !torch.vtensor<[],si8> -> !torch.int
%1651 = torch.aten._make_per_tensor_quantized_tensor %1646, %1649, %1650 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1652 = torch.aten.dequantize.self %1651 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_485 = torch.constant.int 1
%int1_486 = torch.constant.int 1
%int1_487 = torch.constant.int 1
%int1_488 = torch.constant.int 1
%int1_489 = torch.constant.int 1
%int1_490 = torch.constant.int 1
%int0_491 = torch.constant.int 0
%1653 = torch.prim.ListConstruct %int1_485, %int1_486 : (!torch.int, !torch.int) -> !torch.list<int>
%1654 = torch.prim.ListConstruct %int1_487, %int1_488 : (!torch.int, !torch.int) -> !torch.list<int>
%1655 = torch.prim.ListConstruct %int1_489, %int1_490 : (!torch.int, !torch.int) -> !torch.list<int>
%1656 = torch.prim.ListConstruct %int0_491, %int0_491 : (!torch.int, !torch.int) -> !torch.list<int>
%false_492 = torch.constant.bool false
%int1_493 = torch.constant.int 1
%1657 = torch.aten.convolution %1628, %1640, %1652, %1655, %1653, %1654, %false_492, %1656, %int1_493 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%1658 = torch.aten.relu %1657 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%1659 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1660 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_494 = torch.constant.int 12
%1661 = torch.aten.item %1659 : !torch.vtensor<[],f32> -> !torch.float
%1662 = torch.aten.item %1660 : !torch.vtensor<[],si8> -> !torch.int
%1663 = torch.aten.quantize_per_tensor %1658, %1661, %1662, %int12_494 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%1664 = torch.aten.int_repr %1663 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%1665 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1666 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1667 = torch.aten.item %1665 : !torch.vtensor<[],f32> -> !torch.float
%1668 = torch.aten.item %1666 : !torch.vtensor<[],si8> -> !torch.int
%1669 = torch.aten._make_per_tensor_quantized_tensor %1664, %1667, %1668 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%1670 = torch.aten.dequantize.self %1669 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%1671 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%1672 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%1673 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_495 = torch.constant.int 0
%int0_496 = torch.constant.int 0
%int0_497 = torch.constant.int 0
%1674 = torch.aten.select.int %1673, %int0_495, %int0_497 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1675 = torch.aten.item %1674 : !torch.vtensor<[1],si64> -> !torch.int
%1676 = torch.aten.lt.int %1675, %int0_495 : !torch.int, !torch.int -> !torch.bool
%1677 = torch.aten.Int.bool %1676 : !torch.bool -> !torch.int
%1678 = torch.aten.mul.int %1677, %int0_496 : !torch.int, !torch.int -> !torch.int
%1679 = torch.aten.add.int %1675, %1678 : !torch.int, !torch.int -> !torch.int
%1680 = torch.prim.ListConstruct %1679 : (!torch.int) -> !torch.list<int>
%false_498 = torch.constant.bool false
%none_499 = torch.constant.none
%1681 = torch.aten.tensor %1680, %none_499, %none_499, %false_498 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_500, %indices_501 = torch.aten.sort %1681, %int0_495, %false_498 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_502 = torch.constant.int 0
%1682 = torch.aten.select.int %values_500, %int0_495, %int0_502 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1683 = torch.aten.item %1682 : !torch.vtensor<[1],si64> -> !torch.int
%1684 = torch.aten.unsqueeze %1671, %1683 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1685 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_503 = torch.constant.int 0
%int0_504 = torch.constant.int 0
%int0_505 = torch.constant.int 0
%1686 = torch.aten.select.int %1685, %int0_503, %int0_505 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1687 = torch.aten.item %1686 : !torch.vtensor<[1],si64> -> !torch.int
%1688 = torch.aten.lt.int %1687, %int0_503 : !torch.int, !torch.int -> !torch.bool
%1689 = torch.aten.Int.bool %1688 : !torch.bool -> !torch.int
%1690 = torch.aten.mul.int %1689, %int0_504 : !torch.int, !torch.int -> !torch.int
%1691 = torch.aten.add.int %1687, %1690 : !torch.int, !torch.int -> !torch.int
%1692 = torch.prim.ListConstruct %1691 : (!torch.int) -> !torch.list<int>
%false_506 = torch.constant.bool false
%none_507 = torch.constant.none
%1693 = torch.aten.tensor %1692, %none_507, %none_507, %false_506 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_508, %indices_509 = torch.aten.sort %1693, %int0_503, %false_506 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_510 = torch.constant.int 0
%1694 = torch.aten.select.int %values_508, %int0_503, %int0_510 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1695 = torch.aten.item %1694 : !torch.vtensor<[1],si64> -> !torch.int
%1696 = torch.aten.unsqueeze %1672, %1695 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1697 = torch.prim.ListConstruct %1684, %1696 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_511 = torch.constant.int 0
%1698 = torch.aten.cat %1697, %int0_511 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%1699 = torch.aten._shape_as_tensor %1670 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[4],si64>
%1700 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1701 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1702 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_512 = torch.constant.none
%int1_513 = torch.constant.int 1
%1703 = torch.prim.ListConstruct %int1_513 : (!torch.int) -> !torch.list<int>
%1704 = torch.aten.ones %1703, %none_512, %none_512, %none_512, %none_512 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_514 = torch.constant.int 0
%int0_515 = torch.constant.int 0
%1705 = torch.prim.NumToTensor.Scalar %int0_515 : !torch.int -> !torch.vtensor<[1],si64>
%1706 = torch.aten.index_select %1701, %int0_514, %1705 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1707 = torch.aten.item %1706 : !torch.vtensor<[1],si64> -> !torch.int
%1708 = torch.aten.index_select %1702, %int0_514, %1705 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1709 = torch.aten.item %1708 : !torch.vtensor<[1],si64> -> !torch.int
%1710 = torch.aten.index_select %1700, %int0_514, %1705 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1711 = torch.aten.item %1710 : !torch.vtensor<[1],si64> -> !torch.int
%1712 = torch.aten.index_select %1704, %int0_514, %1705 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1713 = torch.aten.item %1712 : !torch.vtensor<[1],si64> -> !torch.int
%1714 = torch.aten.slice.Tensor %1699, %1711, %1707, %1709, %1713 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_516 = torch.constant.int 4
%none_517 = torch.constant.none
%false_518 = torch.constant.bool false
%1715 = torch.aten.to.dtype %1698, %int4_516, %false_518, %false_518, %none_517 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%1716 = torch.prim.ListConstruct %1714, %1715 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_519 = torch.constant.int 0
%1717 = torch.aten.cat %1716, %int0_519 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%1718 = torch.operator "onnx.Resize"(%1670, %none, %none, %1717) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%1719 = torch.prim.ListConstruct %1718, %1525 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,20,20],f32>) -> !torch.list<vtensor>
%int1_520 = torch.constant.int 1
%1720 = torch.aten.cat %1719, %int1_520 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%1721 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1722 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_521 = torch.constant.int 12
%1723 = torch.aten.item %1721 : !torch.vtensor<[],f32> -> !torch.float
%1724 = torch.aten.item %1722 : !torch.vtensor<[],si8> -> !torch.int
%1725 = torch.aten.quantize_per_tensor %1720, %1723, %1724, %int12_521 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%1726 = torch.aten.int_repr %1725 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%1727 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1728 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1729 = torch.aten.item %1727 : !torch.vtensor<[],f32> -> !torch.float
%1730 = torch.aten.item %1728 : !torch.vtensor<[],si8> -> !torch.int
%1731 = torch.aten._make_per_tensor_quantized_tensor %1726, %1729, %1730 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%1732 = torch.aten.dequantize.self %1731 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%1733 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1734 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_522 = torch.constant.int 12
%1735 = torch.aten.item %1733 : !torch.vtensor<[],f32> -> !torch.float
%1736 = torch.aten.item %1734 : !torch.vtensor<[],si8> -> !torch.int
%1737 = torch.aten.quantize_per_tensor %44, %1735, %1736, %int12_522 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1738 = torch.aten.int_repr %1737 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%1739 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1740 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1741 = torch.aten.item %1739 : !torch.vtensor<[],f32> -> !torch.float
%1742 = torch.aten.item %1740 : !torch.vtensor<[],si8> -> !torch.int
%1743 = torch.aten._make_per_tensor_quantized_tensor %1738, %1741, %1742 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1744 = torch.aten.dequantize.self %1743 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%1745 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1746 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_523 = torch.constant.int 12
%1747 = torch.aten.item %1745 : !torch.vtensor<[],f32> -> !torch.float
%1748 = torch.aten.item %1746 : !torch.vtensor<[],si8> -> !torch.int
%1749 = torch.aten.quantize_per_tensor %45, %1747, %1748, %int12_523 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1750 = torch.aten.int_repr %1749 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1751 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1752 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1753 = torch.aten.item %1751 : !torch.vtensor<[],f32> -> !torch.float
%1754 = torch.aten.item %1752 : !torch.vtensor<[],si8> -> !torch.int
%1755 = torch.aten._make_per_tensor_quantized_tensor %1750, %1753, %1754 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1756 = torch.aten.dequantize.self %1755 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_524 = torch.constant.int 1
%int1_525 = torch.constant.int 1
%int1_526 = torch.constant.int 1
%int1_527 = torch.constant.int 1
%int1_528 = torch.constant.int 1
%int1_529 = torch.constant.int 1
%int0_530 = torch.constant.int 0
%1757 = torch.prim.ListConstruct %int1_524, %int1_525 : (!torch.int, !torch.int) -> !torch.list<int>
%1758 = torch.prim.ListConstruct %int1_526, %int1_527 : (!torch.int, !torch.int) -> !torch.list<int>
%1759 = torch.prim.ListConstruct %int1_528, %int1_529 : (!torch.int, !torch.int) -> !torch.list<int>
%1760 = torch.prim.ListConstruct %int0_530, %int0_530 : (!torch.int, !torch.int) -> !torch.list<int>
%false_531 = torch.constant.bool false
%int1_532 = torch.constant.int 1
%1761 = torch.aten.convolution %1732, %1744, %1756, %1759, %1757, %1758, %false_531, %1760, %int1_532 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,20,20],f32>
%1762 = torch.aten.relu %1761 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[1,32,20,20],f32>
%1763 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1764 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_533 = torch.constant.int 12
%1765 = torch.aten.item %1763 : !torch.vtensor<[],f32> -> !torch.float
%1766 = torch.aten.item %1764 : !torch.vtensor<[],si8> -> !torch.int
%1767 = torch.aten.quantize_per_tensor %1762, %1765, %1766, %int12_533 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%1768 = torch.aten.int_repr %1767 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%1769 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1770 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1771 = torch.aten.item %1769 : !torch.vtensor<[],f32> -> !torch.float
%1772 = torch.aten.item %1770 : !torch.vtensor<[],si8> -> !torch.int
%1773 = torch.aten._make_per_tensor_quantized_tensor %1768, %1771, %1772 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%1774 = torch.aten.dequantize.self %1773 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%1775 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%1776 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%1777 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_534 = torch.constant.int 0
%int0_535 = torch.constant.int 0
%int0_536 = torch.constant.int 0
%1778 = torch.aten.select.int %1777, %int0_534, %int0_536 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1779 = torch.aten.item %1778 : !torch.vtensor<[1],si64> -> !torch.int
%1780 = torch.aten.lt.int %1779, %int0_534 : !torch.int, !torch.int -> !torch.bool
%1781 = torch.aten.Int.bool %1780 : !torch.bool -> !torch.int
%1782 = torch.aten.mul.int %1781, %int0_535 : !torch.int, !torch.int -> !torch.int
%1783 = torch.aten.add.int %1779, %1782 : !torch.int, !torch.int -> !torch.int
%1784 = torch.prim.ListConstruct %1783 : (!torch.int) -> !torch.list<int>
%false_537 = torch.constant.bool false
%none_538 = torch.constant.none
%1785 = torch.aten.tensor %1784, %none_538, %none_538, %false_537 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_539, %indices_540 = torch.aten.sort %1785, %int0_534, %false_537 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_541 = torch.constant.int 0
%1786 = torch.aten.select.int %values_539, %int0_534, %int0_541 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1787 = torch.aten.item %1786 : !torch.vtensor<[1],si64> -> !torch.int
%1788 = torch.aten.unsqueeze %1775, %1787 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1789 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_542 = torch.constant.int 0
%int0_543 = torch.constant.int 0
%int0_544 = torch.constant.int 0
%1790 = torch.aten.select.int %1789, %int0_542, %int0_544 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1791 = torch.aten.item %1790 : !torch.vtensor<[1],si64> -> !torch.int
%1792 = torch.aten.lt.int %1791, %int0_542 : !torch.int, !torch.int -> !torch.bool
%1793 = torch.aten.Int.bool %1792 : !torch.bool -> !torch.int
%1794 = torch.aten.mul.int %1793, %int0_543 : !torch.int, !torch.int -> !torch.int
%1795 = torch.aten.add.int %1791, %1794 : !torch.int, !torch.int -> !torch.int
%1796 = torch.prim.ListConstruct %1795 : (!torch.int) -> !torch.list<int>
%false_545 = torch.constant.bool false
%none_546 = torch.constant.none
%1797 = torch.aten.tensor %1796, %none_546, %none_546, %false_545 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_547, %indices_548 = torch.aten.sort %1797, %int0_542, %false_545 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_549 = torch.constant.int 0
%1798 = torch.aten.select.int %values_547, %int0_542, %int0_549 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1799 = torch.aten.item %1798 : !torch.vtensor<[1],si64> -> !torch.int
%1800 = torch.aten.unsqueeze %1776, %1799 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1801 = torch.prim.ListConstruct %1788, %1800 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_550 = torch.constant.int 0
%1802 = torch.aten.cat %1801, %int0_550 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%1803 = torch.aten._shape_as_tensor %1774 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[4],si64>
%1804 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1805 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1806 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_551 = torch.constant.none
%int1_552 = torch.constant.int 1
%1807 = torch.prim.ListConstruct %int1_552 : (!torch.int) -> !torch.list<int>
%1808 = torch.aten.ones %1807, %none_551, %none_551, %none_551, %none_551 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_553 = torch.constant.int 0
%int0_554 = torch.constant.int 0
%1809 = torch.prim.NumToTensor.Scalar %int0_554 : !torch.int -> !torch.vtensor<[1],si64>
%1810 = torch.aten.index_select %1805, %int0_553, %1809 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1811 = torch.aten.item %1810 : !torch.vtensor<[1],si64> -> !torch.int
%1812 = torch.aten.index_select %1806, %int0_553, %1809 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1813 = torch.aten.item %1812 : !torch.vtensor<[1],si64> -> !torch.int
%1814 = torch.aten.index_select %1804, %int0_553, %1809 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1815 = torch.aten.item %1814 : !torch.vtensor<[1],si64> -> !torch.int
%1816 = torch.aten.index_select %1808, %int0_553, %1809 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1817 = torch.aten.item %1816 : !torch.vtensor<[1],si64> -> !torch.int
%1818 = torch.aten.slice.Tensor %1803, %1815, %1811, %1813, %1817 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_555 = torch.constant.int 4
%none_556 = torch.constant.none
%false_557 = torch.constant.bool false
%1819 = torch.aten.to.dtype %1802, %int4_555, %false_557, %false_557, %none_556 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%1820 = torch.prim.ListConstruct %1818, %1819 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_558 = torch.constant.int 0
%1821 = torch.aten.cat %1820, %int0_558 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%1822 = torch.operator "onnx.Resize"(%1774, %none, %none, %1821) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%1823 = torch.prim.ListConstruct %1822, %1466 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,40,40],f32>) -> !torch.list<vtensor>
%int1_559 = torch.constant.int 1
%1824 = torch.aten.cat %1823, %int1_559 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%1825 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1826 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_560 = torch.constant.int 12
%1827 = torch.aten.item %1825 : !torch.vtensor<[],f32> -> !torch.float
%1828 = torch.aten.item %1826 : !torch.vtensor<[],si8> -> !torch.int
%1829 = torch.aten.quantize_per_tensor %1824, %1827, %1828, %int12_560 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%1830 = torch.aten.int_repr %1829 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%1831 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1832 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1833 = torch.aten.item %1831 : !torch.vtensor<[],f32> -> !torch.float
%1834 = torch.aten.item %1832 : !torch.vtensor<[],si8> -> !torch.int
%1835 = torch.aten._make_per_tensor_quantized_tensor %1830, %1833, %1834 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%1836 = torch.aten.dequantize.self %1835 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%1837 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1838 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_561 = torch.constant.int 12
%1839 = torch.aten.item %1837 : !torch.vtensor<[],f32> -> !torch.float
%1840 = torch.aten.item %1838 : !torch.vtensor<[],si8> -> !torch.int
%1841 = torch.aten.quantize_per_tensor %46, %1839, %1840, %int12_561 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1842 = torch.aten.int_repr %1841 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%1843 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1844 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1845 = torch.aten.item %1843 : !torch.vtensor<[],f32> -> !torch.float
%1846 = torch.aten.item %1844 : !torch.vtensor<[],si8> -> !torch.int
%1847 = torch.aten._make_per_tensor_quantized_tensor %1842, %1845, %1846 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1848 = torch.aten.dequantize.self %1847 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%1849 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1850 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_562 = torch.constant.int 12
%1851 = torch.aten.item %1849 : !torch.vtensor<[],f32> -> !torch.float
%1852 = torch.aten.item %1850 : !torch.vtensor<[],si8> -> !torch.int
%1853 = torch.aten.quantize_per_tensor %47, %1851, %1852, %int12_562 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1854 = torch.aten.int_repr %1853 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1855 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1856 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1857 = torch.aten.item %1855 : !torch.vtensor<[],f32> -> !torch.float
%1858 = torch.aten.item %1856 : !torch.vtensor<[],si8> -> !torch.int
%1859 = torch.aten._make_per_tensor_quantized_tensor %1854, %1857, %1858 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1860 = torch.aten.dequantize.self %1859 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_563 = torch.constant.int 1
%int1_564 = torch.constant.int 1
%int1_565 = torch.constant.int 1
%int1_566 = torch.constant.int 1
%int1_567 = torch.constant.int 1
%int1_568 = torch.constant.int 1
%int0_569 = torch.constant.int 0
%1861 = torch.prim.ListConstruct %int1_563, %int1_564 : (!torch.int, !torch.int) -> !torch.list<int>
%1862 = torch.prim.ListConstruct %int1_565, %int1_566 : (!torch.int, !torch.int) -> !torch.list<int>
%1863 = torch.prim.ListConstruct %int1_567, %int1_568 : (!torch.int, !torch.int) -> !torch.list<int>
%1864 = torch.prim.ListConstruct %int0_569, %int0_569 : (!torch.int, !torch.int) -> !torch.list<int>
%false_570 = torch.constant.bool false
%int1_571 = torch.constant.int 1
%1865 = torch.aten.convolution %1836, %1848, %1860, %1863, %1861, %1862, %false_570, %1864, %int1_571 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,40,40],f32>
%1866 = torch.aten.relu %1865 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[1,32,40,40],f32>
%1867 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1868 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_572 = torch.constant.int 12
%1869 = torch.aten.item %1867 : !torch.vtensor<[],f32> -> !torch.float
%1870 = torch.aten.item %1868 : !torch.vtensor<[],si8> -> !torch.int
%1871 = torch.aten.quantize_per_tensor %1866, %1869, %1870, %int12_572 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%1872 = torch.aten.int_repr %1871 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%1873 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1874 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1875 = torch.aten.item %1873 : !torch.vtensor<[],f32> -> !torch.float
%1876 = torch.aten.item %1874 : !torch.vtensor<[],si8> -> !torch.int
%1877 = torch.aten._make_per_tensor_quantized_tensor %1872, %1875, %1876 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%1878 = torch.aten.dequantize.self %1877 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%1879 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%1880 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%1881 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_573 = torch.constant.int 0
%int0_574 = torch.constant.int 0
%int0_575 = torch.constant.int 0
%1882 = torch.aten.select.int %1881, %int0_573, %int0_575 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1883 = torch.aten.item %1882 : !torch.vtensor<[1],si64> -> !torch.int
%1884 = torch.aten.lt.int %1883, %int0_573 : !torch.int, !torch.int -> !torch.bool
%1885 = torch.aten.Int.bool %1884 : !torch.bool -> !torch.int
%1886 = torch.aten.mul.int %1885, %int0_574 : !torch.int, !torch.int -> !torch.int
%1887 = torch.aten.add.int %1883, %1886 : !torch.int, !torch.int -> !torch.int
%1888 = torch.prim.ListConstruct %1887 : (!torch.int) -> !torch.list<int>
%false_576 = torch.constant.bool false
%none_577 = torch.constant.none
%1889 = torch.aten.tensor %1888, %none_577, %none_577, %false_576 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_578, %indices_579 = torch.aten.sort %1889, %int0_573, %false_576 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_580 = torch.constant.int 0
%1890 = torch.aten.select.int %values_578, %int0_573, %int0_580 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1891 = torch.aten.item %1890 : !torch.vtensor<[1],si64> -> !torch.int
%1892 = torch.aten.unsqueeze %1879, %1891 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1893 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_581 = torch.constant.int 0
%int0_582 = torch.constant.int 0
%int0_583 = torch.constant.int 0
%1894 = torch.aten.select.int %1893, %int0_581, %int0_583 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1895 = torch.aten.item %1894 : !torch.vtensor<[1],si64> -> !torch.int
%1896 = torch.aten.lt.int %1895, %int0_581 : !torch.int, !torch.int -> !torch.bool
%1897 = torch.aten.Int.bool %1896 : !torch.bool -> !torch.int
%1898 = torch.aten.mul.int %1897, %int0_582 : !torch.int, !torch.int -> !torch.int
%1899 = torch.aten.add.int %1895, %1898 : !torch.int, !torch.int -> !torch.int
%1900 = torch.prim.ListConstruct %1899 : (!torch.int) -> !torch.list<int>
%false_584 = torch.constant.bool false
%none_585 = torch.constant.none
%1901 = torch.aten.tensor %1900, %none_585, %none_585, %false_584 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_586, %indices_587 = torch.aten.sort %1901, %int0_581, %false_584 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_588 = torch.constant.int 0
%1902 = torch.aten.select.int %values_586, %int0_581, %int0_588 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1903 = torch.aten.item %1902 : !torch.vtensor<[1],si64> -> !torch.int
%1904 = torch.aten.unsqueeze %1880, %1903 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1905 = torch.prim.ListConstruct %1892, %1904 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_589 = torch.constant.int 0
%1906 = torch.aten.cat %1905, %int0_589 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%1907 = torch.aten._shape_as_tensor %1878 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[4],si64>
%1908 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1909 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%1910 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_590 = torch.constant.none
%int1_591 = torch.constant.int 1
%1911 = torch.prim.ListConstruct %int1_591 : (!torch.int) -> !torch.list<int>
%1912 = torch.aten.ones %1911, %none_590, %none_590, %none_590, %none_590 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_592 = torch.constant.int 0
%int0_593 = torch.constant.int 0
%1913 = torch.prim.NumToTensor.Scalar %int0_593 : !torch.int -> !torch.vtensor<[1],si64>
%1914 = torch.aten.index_select %1909, %int0_592, %1913 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1915 = torch.aten.item %1914 : !torch.vtensor<[1],si64> -> !torch.int
%1916 = torch.aten.index_select %1910, %int0_592, %1913 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1917 = torch.aten.item %1916 : !torch.vtensor<[1],si64> -> !torch.int
%1918 = torch.aten.index_select %1908, %int0_592, %1913 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1919 = torch.aten.item %1918 : !torch.vtensor<[1],si64> -> !torch.int
%1920 = torch.aten.index_select %1912, %int0_592, %1913 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%1921 = torch.aten.item %1920 : !torch.vtensor<[1],si64> -> !torch.int
%1922 = torch.aten.slice.Tensor %1907, %1919, %1915, %1917, %1921 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_594 = torch.constant.int 4
%none_595 = torch.constant.none
%false_596 = torch.constant.bool false
%1923 = torch.aten.to.dtype %1906, %int4_594, %false_596, %false_596, %none_595 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%1924 = torch.prim.ListConstruct %1922, %1923 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_597 = torch.constant.int 0
%1925 = torch.aten.cat %1924, %int0_597 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%1926 = torch.operator "onnx.Resize"(%1878, %none, %none, %1925) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,40,40],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%1927 = torch.prim.ListConstruct %1926, %1407 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,80,80],f32>) -> !torch.list<vtensor>
%int1_598 = torch.constant.int 1
%1928 = torch.aten.cat %1927, %int1_598 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,80,80],f32>
%1929 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1930 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_599 = torch.constant.int 12
%1931 = torch.aten.item %1929 : !torch.vtensor<[],f32> -> !torch.float
%1932 = torch.aten.item %1930 : !torch.vtensor<[],si8> -> !torch.int
%1933 = torch.aten.quantize_per_tensor %1928, %1931, %1932, %int12_599 : !torch.vtensor<[1,?,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%1934 = torch.aten.int_repr %1933 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],si8>
%1935 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1936 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1937 = torch.aten.item %1935 : !torch.vtensor<[],f32> -> !torch.float
%1938 = torch.aten.item %1936 : !torch.vtensor<[],si8> -> !torch.int
%1939 = torch.aten._make_per_tensor_quantized_tensor %1934, %1937, %1938 : !torch.vtensor<[1,?,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%1940 = torch.aten.dequantize.self %1939 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],f32>
%1941 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1942 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_600 = torch.constant.int 12
%1943 = torch.aten.item %1941 : !torch.vtensor<[],f32> -> !torch.float
%1944 = torch.aten.item %1942 : !torch.vtensor<[],si8> -> !torch.int
%1945 = torch.aten.quantize_per_tensor %48, %1943, %1944, %int12_600 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1946 = torch.aten.int_repr %1945 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%1947 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%1948 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1949 = torch.aten.item %1947 : !torch.vtensor<[],f32> -> !torch.float
%1950 = torch.aten.item %1948 : !torch.vtensor<[],si8> -> !torch.int
%1951 = torch.aten._make_per_tensor_quantized_tensor %1946, %1949, %1950 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%1952 = torch.aten.dequantize.self %1951 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%1953 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1954 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_601 = torch.constant.int 12
%1955 = torch.aten.item %1953 : !torch.vtensor<[],f32> -> !torch.float
%1956 = torch.aten.item %1954 : !torch.vtensor<[],si8> -> !torch.int
%1957 = torch.aten.quantize_per_tensor %49, %1955, %1956, %int12_601 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1958 = torch.aten.int_repr %1957 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1959 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1960 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1961 = torch.aten.item %1959 : !torch.vtensor<[],f32> -> !torch.float
%1962 = torch.aten.item %1960 : !torch.vtensor<[],si8> -> !torch.int
%1963 = torch.aten._make_per_tensor_quantized_tensor %1958, %1961, %1962 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1964 = torch.aten.dequantize.self %1963 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_602 = torch.constant.int 1
%int1_603 = torch.constant.int 1
%int1_604 = torch.constant.int 1
%int1_605 = torch.constant.int 1
%int1_606 = torch.constant.int 1
%int1_607 = torch.constant.int 1
%int0_608 = torch.constant.int 0
%1965 = torch.prim.ListConstruct %int1_602, %int1_603 : (!torch.int, !torch.int) -> !torch.list<int>
%1966 = torch.prim.ListConstruct %int1_604, %int1_605 : (!torch.int, !torch.int) -> !torch.list<int>
%1967 = torch.prim.ListConstruct %int1_606, %int1_607 : (!torch.int, !torch.int) -> !torch.list<int>
%1968 = torch.prim.ListConstruct %int0_608, %int0_608 : (!torch.int, !torch.int) -> !torch.list<int>
%false_609 = torch.constant.bool false
%int1_610 = torch.constant.int 1
%1969 = torch.aten.convolution %1940, %1952, %1964, %1967, %1965, %1966, %false_609, %1968, %int1_610 : !torch.vtensor<[1,?,80,80],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,80,80],f32>
%1970 = torch.aten.relu %1969 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[1,32,80,80],f32>
%1971 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1972 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_611 = torch.constant.int 12
%1973 = torch.aten.item %1971 : !torch.vtensor<[],f32> -> !torch.float
%1974 = torch.aten.item %1972 : !torch.vtensor<[],si8> -> !torch.int
%1975 = torch.aten.quantize_per_tensor %1970, %1973, %1974, %int12_611 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1976 = torch.aten.int_repr %1975 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%1977 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1978 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1979 = torch.aten.item %1977 : !torch.vtensor<[],f32> -> !torch.float
%1980 = torch.aten.item %1978 : !torch.vtensor<[],si8> -> !torch.int
%1981 = torch.aten._make_per_tensor_quantized_tensor %1976, %1979, %1980 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%1982 = torch.aten.dequantize.self %1981 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%1983 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%1984 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%1985 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_612 = torch.constant.int 0
%int0_613 = torch.constant.int 0
%int0_614 = torch.constant.int 0
%1986 = torch.aten.select.int %1985, %int0_612, %int0_614 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1987 = torch.aten.item %1986 : !torch.vtensor<[1],si64> -> !torch.int
%1988 = torch.aten.lt.int %1987, %int0_612 : !torch.int, !torch.int -> !torch.bool
%1989 = torch.aten.Int.bool %1988 : !torch.bool -> !torch.int
%1990 = torch.aten.mul.int %1989, %int0_613 : !torch.int, !torch.int -> !torch.int
%1991 = torch.aten.add.int %1987, %1990 : !torch.int, !torch.int -> !torch.int
%1992 = torch.prim.ListConstruct %1991 : (!torch.int) -> !torch.list<int>
%false_615 = torch.constant.bool false
%none_616 = torch.constant.none
%1993 = torch.aten.tensor %1992, %none_616, %none_616, %false_615 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_617, %indices_618 = torch.aten.sort %1993, %int0_612, %false_615 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_619 = torch.constant.int 0
%1994 = torch.aten.select.int %values_617, %int0_612, %int0_619 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1995 = torch.aten.item %1994 : !torch.vtensor<[1],si64> -> !torch.int
%1996 = torch.aten.unsqueeze %1983, %1995 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%1997 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_620 = torch.constant.int 0
%int0_621 = torch.constant.int 0
%int0_622 = torch.constant.int 0
%1998 = torch.aten.select.int %1997, %int0_620, %int0_622 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%1999 = torch.aten.item %1998 : !torch.vtensor<[1],si64> -> !torch.int
%2000 = torch.aten.lt.int %1999, %int0_620 : !torch.int, !torch.int -> !torch.bool
%2001 = torch.aten.Int.bool %2000 : !torch.bool -> !torch.int
%2002 = torch.aten.mul.int %2001, %int0_621 : !torch.int, !torch.int -> !torch.int
%2003 = torch.aten.add.int %1999, %2002 : !torch.int, !torch.int -> !torch.int
%2004 = torch.prim.ListConstruct %2003 : (!torch.int) -> !torch.list<int>
%false_623 = torch.constant.bool false
%none_624 = torch.constant.none
%2005 = torch.aten.tensor %2004, %none_624, %none_624, %false_623 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_625, %indices_626 = torch.aten.sort %2005, %int0_620, %false_623 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_627 = torch.constant.int 0
%2006 = torch.aten.select.int %values_625, %int0_620, %int0_627 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2007 = torch.aten.item %2006 : !torch.vtensor<[1],si64> -> !torch.int
%2008 = torch.aten.unsqueeze %1984, %2007 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2009 = torch.prim.ListConstruct %1996, %2008 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_628 = torch.constant.int 0
%2010 = torch.aten.cat %2009, %int0_628 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%2011 = torch.aten._shape_as_tensor %1982 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[4],si64>
%2012 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2013 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2014 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_629 = torch.constant.none
%int1_630 = torch.constant.int 1
%2015 = torch.prim.ListConstruct %int1_630 : (!torch.int) -> !torch.list<int>
%2016 = torch.aten.ones %2015, %none_629, %none_629, %none_629, %none_629 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_631 = torch.constant.int 0
%int0_632 = torch.constant.int 0
%2017 = torch.prim.NumToTensor.Scalar %int0_632 : !torch.int -> !torch.vtensor<[1],si64>
%2018 = torch.aten.index_select %2013, %int0_631, %2017 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2019 = torch.aten.item %2018 : !torch.vtensor<[1],si64> -> !torch.int
%2020 = torch.aten.index_select %2014, %int0_631, %2017 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2021 = torch.aten.item %2020 : !torch.vtensor<[1],si64> -> !torch.int
%2022 = torch.aten.index_select %2012, %int0_631, %2017 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2023 = torch.aten.item %2022 : !torch.vtensor<[1],si64> -> !torch.int
%2024 = torch.aten.index_select %2016, %int0_631, %2017 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2025 = torch.aten.item %2024 : !torch.vtensor<[1],si64> -> !torch.int
%2026 = torch.aten.slice.Tensor %2011, %2023, %2019, %2021, %2025 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_633 = torch.constant.int 4
%none_634 = torch.constant.none
%false_635 = torch.constant.bool false
%2027 = torch.aten.to.dtype %2010, %int4_633, %false_635, %false_635, %none_634 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%2028 = torch.prim.ListConstruct %2026, %2027 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_636 = torch.constant.int 0
%2029 = torch.aten.cat %2028, %int0_636 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%2030 = torch.operator "onnx.Resize"(%1982, %none, %none, %2029) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,80,80],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%2031 = torch.prim.ListConstruct %2030, %1348 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,160,160],f32>) -> !torch.list<vtensor>
%int1_637 = torch.constant.int 1
%2032 = torch.aten.cat %2031, %int1_637 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,160,160],f32>
%2033 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2034 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_638 = torch.constant.int 12
%2035 = torch.aten.item %2033 : !torch.vtensor<[],f32> -> !torch.float
%2036 = torch.aten.item %2034 : !torch.vtensor<[],si8> -> !torch.int
%2037 = torch.aten.quantize_per_tensor %2032, %2035, %2036, %int12_638 : !torch.vtensor<[1,?,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%2038 = torch.aten.int_repr %2037 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],si8>
%2039 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2040 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2041 = torch.aten.item %2039 : !torch.vtensor<[],f32> -> !torch.float
%2042 = torch.aten.item %2040 : !torch.vtensor<[],si8> -> !torch.int
%2043 = torch.aten._make_per_tensor_quantized_tensor %2038, %2041, %2042 : !torch.vtensor<[1,?,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%2044 = torch.aten.dequantize.self %2043 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],f32>
%2045 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2046 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_639 = torch.constant.int 12
%2047 = torch.aten.item %2045 : !torch.vtensor<[],f32> -> !torch.float
%2048 = torch.aten.item %2046 : !torch.vtensor<[],si8> -> !torch.int
%2049 = torch.aten.quantize_per_tensor %50, %2047, %2048, %int12_639 : !torch.vtensor<[128,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,64,3,3],!torch.qint8>
%2050 = torch.aten.int_repr %2049 : !torch.vtensor<[128,64,3,3],!torch.qint8> -> !torch.vtensor<[128,64,3,3],si8>
%2051 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2052 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2053 = torch.aten.item %2051 : !torch.vtensor<[],f32> -> !torch.float
%2054 = torch.aten.item %2052 : !torch.vtensor<[],si8> -> !torch.int
%2055 = torch.aten._make_per_tensor_quantized_tensor %2050, %2053, %2054 : !torch.vtensor<[128,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,64,3,3],!torch.qint8>
%2056 = torch.aten.dequantize.self %2055 : !torch.vtensor<[128,64,3,3],!torch.qint8> -> !torch.vtensor<[128,64,3,3],f32>
%2057 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2058 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_640 = torch.constant.int 12
%2059 = torch.aten.item %2057 : !torch.vtensor<[],f32> -> !torch.float
%2060 = torch.aten.item %2058 : !torch.vtensor<[],si8> -> !torch.int
%2061 = torch.aten.quantize_per_tensor %51, %2059, %2060, %int12_640 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2062 = torch.aten.int_repr %2061 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%2063 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2064 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2065 = torch.aten.item %2063 : !torch.vtensor<[],f32> -> !torch.float
%2066 = torch.aten.item %2064 : !torch.vtensor<[],si8> -> !torch.int
%2067 = torch.aten._make_per_tensor_quantized_tensor %2062, %2065, %2066 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2068 = torch.aten.dequantize.self %2067 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_641 = torch.constant.int 1
%int1_642 = torch.constant.int 1
%int1_643 = torch.constant.int 1
%int1_644 = torch.constant.int 1
%int1_645 = torch.constant.int 1
%int1_646 = torch.constant.int 1
%int0_647 = torch.constant.int 0
%2069 = torch.prim.ListConstruct %int1_641, %int1_642 : (!torch.int, !torch.int) -> !torch.list<int>
%2070 = torch.prim.ListConstruct %int1_643, %int1_644 : (!torch.int, !torch.int) -> !torch.list<int>
%2071 = torch.prim.ListConstruct %int1_645, %int1_646 : (!torch.int, !torch.int) -> !torch.list<int>
%2072 = torch.prim.ListConstruct %int0_647, %int0_647 : (!torch.int, !torch.int) -> !torch.list<int>
%false_648 = torch.constant.bool false
%int1_649 = torch.constant.int 1
%2073 = torch.aten.convolution %2044, %2056, %2068, %2071, %2069, %2070, %false_648, %2072, %int1_649 : !torch.vtensor<[1,?,160,160],f32>, !torch.vtensor<[128,64,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,160,160],f32>
%2074 = torch.aten.relu %2073 : !torch.vtensor<[1,128,160,160],f32> -> !torch.vtensor<[1,128,160,160],f32>
%2075 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2076 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_650 = torch.constant.int 12
%2077 = torch.aten.item %2075 : !torch.vtensor<[],f32> -> !torch.float
%2078 = torch.aten.item %2076 : !torch.vtensor<[],si8> -> !torch.int
%2079 = torch.aten.quantize_per_tensor %2074, %2077, %2078, %int12_650 : !torch.vtensor<[1,128,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,160,160],!torch.qint8>
%2080 = torch.aten.int_repr %2079 : !torch.vtensor<[1,128,160,160],!torch.qint8> -> !torch.vtensor<[1,128,160,160],si8>
%2081 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2082 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2083 = torch.aten.item %2081 : !torch.vtensor<[],f32> -> !torch.float
%2084 = torch.aten.item %2082 : !torch.vtensor<[],si8> -> !torch.int
%2085 = torch.aten._make_per_tensor_quantized_tensor %2080, %2083, %2084 : !torch.vtensor<[1,128,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,160,160],!torch.qint8>
%2086 = torch.aten.dequantize.self %2085 : !torch.vtensor<[1,128,160,160],!torch.qint8> -> !torch.vtensor<[1,128,160,160],f32>
%int1_651 = torch.constant.int 1
%2087 = torch.aten.add.Tensor %2086, %1306, %int1_651 : !torch.vtensor<[1,128,160,160],f32>, !torch.vtensor<[1,128,160,160],f32>, !torch.int -> !torch.vtensor<[1,128,160,160],f32>
%2088 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2089 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_652 = torch.constant.int 12
%2090 = torch.aten.item %2088 : !torch.vtensor<[],f32> -> !torch.float
%2091 = torch.aten.item %2089 : !torch.vtensor<[],si8> -> !torch.int
%2092 = torch.aten.quantize_per_tensor %2087, %2090, %2091, %int12_652 : !torch.vtensor<[1,128,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,160,160],!torch.qint8>
%2093 = torch.aten.int_repr %2092 : !torch.vtensor<[1,128,160,160],!torch.qint8> -> !torch.vtensor<[1,128,160,160],si8>
%2094 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2095 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2096 = torch.aten.item %2094 : !torch.vtensor<[],f32> -> !torch.float
%2097 = torch.aten.item %2095 : !torch.vtensor<[],si8> -> !torch.int
%2098 = torch.aten._make_per_tensor_quantized_tensor %2093, %2096, %2097 : !torch.vtensor<[1,128,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,160,160],!torch.qint8>
%2099 = torch.aten.dequantize.self %2098 : !torch.vtensor<[1,128,160,160],!torch.qint8> -> !torch.vtensor<[1,128,160,160],f32>
%int2_653 = torch.constant.int 2
%int2_654 = torch.constant.int 2
%2100 = torch.prim.ListConstruct %int2_653, %int2_654 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_655 = torch.constant.int 0
%int0_656 = torch.constant.int 0
%2101 = torch.prim.ListConstruct %int0_655, %int0_656 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_657 = torch.constant.int 2
%int2_658 = torch.constant.int 2
%2102 = torch.prim.ListConstruct %int2_657, %int2_658 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_659 = torch.constant.int 1
%int1_660 = torch.constant.int 1
%2103 = torch.prim.ListConstruct %int1_659, %int1_660 : (!torch.int, !torch.int) -> !torch.list<int>
%true_661 = torch.constant.bool true
%2104 = torch.aten.max_pool2d %2099, %2100, %2102, %2101, %2103, %true_661 : !torch.vtensor<[1,128,160,160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,128,80,80],f32>
%2105 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2106 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_662 = torch.constant.int 12
%2107 = torch.aten.item %2105 : !torch.vtensor<[],f32> -> !torch.float
%2108 = torch.aten.item %2106 : !torch.vtensor<[],si8> -> !torch.int
%2109 = torch.aten.quantize_per_tensor %2104, %2107, %2108, %int12_662 : !torch.vtensor<[1,128,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%2110 = torch.aten.int_repr %2109 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],si8>
%2111 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2112 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2113 = torch.aten.item %2111 : !torch.vtensor<[],f32> -> !torch.float
%2114 = torch.aten.item %2112 : !torch.vtensor<[],si8> -> !torch.int
%2115 = torch.aten._make_per_tensor_quantized_tensor %2110, %2113, %2114 : !torch.vtensor<[1,128,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%2116 = torch.aten.dequantize.self %2115 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],f32>
%2117 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2118 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_663 = torch.constant.int 12
%2119 = torch.aten.item %2117 : !torch.vtensor<[],f32> -> !torch.float
%2120 = torch.aten.item %2118 : !torch.vtensor<[],si8> -> !torch.int
%2121 = torch.aten.quantize_per_tensor %52, %2119, %2120, %int12_663 : !torch.vtensor<[256,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,128,3,3],!torch.qint8>
%2122 = torch.aten.int_repr %2121 : !torch.vtensor<[256,128,3,3],!torch.qint8> -> !torch.vtensor<[256,128,3,3],si8>
%2123 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2124 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2125 = torch.aten.item %2123 : !torch.vtensor<[],f32> -> !torch.float
%2126 = torch.aten.item %2124 : !torch.vtensor<[],si8> -> !torch.int
%2127 = torch.aten._make_per_tensor_quantized_tensor %2122, %2125, %2126 : !torch.vtensor<[256,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,128,3,3],!torch.qint8>
%2128 = torch.aten.dequantize.self %2127 : !torch.vtensor<[256,128,3,3],!torch.qint8> -> !torch.vtensor<[256,128,3,3],f32>
%2129 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2130 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_664 = torch.constant.int 12
%2131 = torch.aten.item %2129 : !torch.vtensor<[],f32> -> !torch.float
%2132 = torch.aten.item %2130 : !torch.vtensor<[],si8> -> !torch.int
%2133 = torch.aten.quantize_per_tensor %53, %2131, %2132, %int12_664 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2134 = torch.aten.int_repr %2133 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2135 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2136 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2137 = torch.aten.item %2135 : !torch.vtensor<[],f32> -> !torch.float
%2138 = torch.aten.item %2136 : !torch.vtensor<[],si8> -> !torch.int
%2139 = torch.aten._make_per_tensor_quantized_tensor %2134, %2137, %2138 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2140 = torch.aten.dequantize.self %2139 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_665 = torch.constant.int 1
%int1_666 = torch.constant.int 1
%int1_667 = torch.constant.int 1
%int1_668 = torch.constant.int 1
%int1_669 = torch.constant.int 1
%int1_670 = torch.constant.int 1
%int0_671 = torch.constant.int 0
%2141 = torch.prim.ListConstruct %int1_665, %int1_666 : (!torch.int, !torch.int) -> !torch.list<int>
%2142 = torch.prim.ListConstruct %int1_667, %int1_668 : (!torch.int, !torch.int) -> !torch.list<int>
%2143 = torch.prim.ListConstruct %int1_669, %int1_670 : (!torch.int, !torch.int) -> !torch.list<int>
%2144 = torch.prim.ListConstruct %int0_671, %int0_671 : (!torch.int, !torch.int) -> !torch.list<int>
%false_672 = torch.constant.bool false
%int1_673 = torch.constant.int 1
%2145 = torch.aten.convolution %2116, %2128, %2140, %2143, %2141, %2142, %false_672, %2144, %int1_673 : !torch.vtensor<[1,128,80,80],f32>, !torch.vtensor<[256,128,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,80,80],f32>
%2146 = torch.aten.relu %2145 : !torch.vtensor<[1,256,80,80],f32> -> !torch.vtensor<[1,256,80,80],f32>
%2147 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2148 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_674 = torch.constant.int 12
%2149 = torch.aten.item %2147 : !torch.vtensor<[],f32> -> !torch.float
%2150 = torch.aten.item %2148 : !torch.vtensor<[],si8> -> !torch.int
%2151 = torch.aten.quantize_per_tensor %2146, %2149, %2150, %int12_674 : !torch.vtensor<[1,256,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,80,80],!torch.qint8>
%2152 = torch.aten.int_repr %2151 : !torch.vtensor<[1,256,80,80],!torch.qint8> -> !torch.vtensor<[1,256,80,80],si8>
%2153 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2154 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2155 = torch.aten.item %2153 : !torch.vtensor<[],f32> -> !torch.float
%2156 = torch.aten.item %2154 : !torch.vtensor<[],si8> -> !torch.int
%2157 = torch.aten._make_per_tensor_quantized_tensor %2152, %2155, %2156 : !torch.vtensor<[1,256,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,80,80],!torch.qint8>
%2158 = torch.aten.dequantize.self %2157 : !torch.vtensor<[1,256,80,80],!torch.qint8> -> !torch.vtensor<[1,256,80,80],f32>
%2159 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2160 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_675 = torch.constant.int 12
%2161 = torch.aten.item %2159 : !torch.vtensor<[],f32> -> !torch.float
%2162 = torch.aten.item %2160 : !torch.vtensor<[],si8> -> !torch.int
%2163 = torch.aten.quantize_per_tensor %54, %2161, %2162, %int12_675 : !torch.vtensor<[64,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,256,3,3],!torch.qint8>
%2164 = torch.aten.int_repr %2163 : !torch.vtensor<[64,256,3,3],!torch.qint8> -> !torch.vtensor<[64,256,3,3],si8>
%2165 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2166 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2167 = torch.aten.item %2165 : !torch.vtensor<[],f32> -> !torch.float
%2168 = torch.aten.item %2166 : !torch.vtensor<[],si8> -> !torch.int
%2169 = torch.aten._make_per_tensor_quantized_tensor %2164, %2167, %2168 : !torch.vtensor<[64,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,256,3,3],!torch.qint8>
%2170 = torch.aten.dequantize.self %2169 : !torch.vtensor<[64,256,3,3],!torch.qint8> -> !torch.vtensor<[64,256,3,3],f32>
%2171 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2172 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_676 = torch.constant.int 12
%2173 = torch.aten.item %2171 : !torch.vtensor<[],f32> -> !torch.float
%2174 = torch.aten.item %2172 : !torch.vtensor<[],si8> -> !torch.int
%2175 = torch.aten.quantize_per_tensor %55, %2173, %2174, %int12_676 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2176 = torch.aten.int_repr %2175 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2177 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2178 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2179 = torch.aten.item %2177 : !torch.vtensor<[],f32> -> !torch.float
%2180 = torch.aten.item %2178 : !torch.vtensor<[],si8> -> !torch.int
%2181 = torch.aten._make_per_tensor_quantized_tensor %2176, %2179, %2180 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2182 = torch.aten.dequantize.self %2181 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_677 = torch.constant.int 1
%int1_678 = torch.constant.int 1
%int1_679 = torch.constant.int 1
%int1_680 = torch.constant.int 1
%int1_681 = torch.constant.int 1
%int1_682 = torch.constant.int 1
%int0_683 = torch.constant.int 0
%2183 = torch.prim.ListConstruct %int1_677, %int1_678 : (!torch.int, !torch.int) -> !torch.list<int>
%2184 = torch.prim.ListConstruct %int1_679, %int1_680 : (!torch.int, !torch.int) -> !torch.list<int>
%2185 = torch.prim.ListConstruct %int1_681, %int1_682 : (!torch.int, !torch.int) -> !torch.list<int>
%2186 = torch.prim.ListConstruct %int0_683, %int0_683 : (!torch.int, !torch.int) -> !torch.list<int>
%false_684 = torch.constant.bool false
%int1_685 = torch.constant.int 1
%2187 = torch.aten.convolution %2158, %2170, %2182, %2185, %2183, %2184, %false_684, %2186, %int1_685 : !torch.vtensor<[1,256,80,80],f32>, !torch.vtensor<[64,256,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,80,80],f32>
%2188 = torch.aten.relu %2187 : !torch.vtensor<[1,64,80,80],f32> -> !torch.vtensor<[1,64,80,80],f32>
%2189 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2190 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_686 = torch.constant.int 12
%2191 = torch.aten.item %2189 : !torch.vtensor<[],f32> -> !torch.float
%2192 = torch.aten.item %2190 : !torch.vtensor<[],si8> -> !torch.int
%2193 = torch.aten.quantize_per_tensor %2188, %2191, %2192, %int12_686 : !torch.vtensor<[1,64,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,80,80],!torch.qint8>
%2194 = torch.aten.int_repr %2193 : !torch.vtensor<[1,64,80,80],!torch.qint8> -> !torch.vtensor<[1,64,80,80],si8>
%2195 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2196 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2197 = torch.aten.item %2195 : !torch.vtensor<[],f32> -> !torch.float
%2198 = torch.aten.item %2196 : !torch.vtensor<[],si8> -> !torch.int
%2199 = torch.aten._make_per_tensor_quantized_tensor %2194, %2197, %2198 : !torch.vtensor<[1,64,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,80,80],!torch.qint8>
%2200 = torch.aten.dequantize.self %2199 : !torch.vtensor<[1,64,80,80],!torch.qint8> -> !torch.vtensor<[1,64,80,80],f32>
%int2_687 = torch.constant.int 2
%int2_688 = torch.constant.int 2
%2201 = torch.prim.ListConstruct %int2_687, %int2_688 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_689 = torch.constant.int 0
%int0_690 = torch.constant.int 0
%2202 = torch.prim.ListConstruct %int0_689, %int0_690 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_691 = torch.constant.int 2
%int2_692 = torch.constant.int 2
%2203 = torch.prim.ListConstruct %int2_691, %int2_692 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_693 = torch.constant.int 1
%int1_694 = torch.constant.int 1
%2204 = torch.prim.ListConstruct %int1_693, %int1_694 : (!torch.int, !torch.int) -> !torch.list<int>
%true_695 = torch.constant.bool true
%2205 = torch.aten.max_pool2d %2200, %2201, %2203, %2202, %2204, %true_695 : !torch.vtensor<[1,64,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,40,40],f32>
%2206 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2207 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_696 = torch.constant.int 12
%2208 = torch.aten.item %2206 : !torch.vtensor<[],f32> -> !torch.float
%2209 = torch.aten.item %2207 : !torch.vtensor<[],si8> -> !torch.int
%2210 = torch.aten.quantize_per_tensor %2205, %2208, %2209, %int12_696 : !torch.vtensor<[1,64,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%2211 = torch.aten.int_repr %2210 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],si8>
%2212 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2213 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2214 = torch.aten.item %2212 : !torch.vtensor<[],f32> -> !torch.float
%2215 = torch.aten.item %2213 : !torch.vtensor<[],si8> -> !torch.int
%2216 = torch.aten._make_per_tensor_quantized_tensor %2211, %2214, %2215 : !torch.vtensor<[1,64,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%2217 = torch.aten.dequantize.self %2216 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],f32>
%2218 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2219 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_697 = torch.constant.int 12
%2220 = torch.aten.item %2218 : !torch.vtensor<[],f32> -> !torch.float
%2221 = torch.aten.item %2219 : !torch.vtensor<[],si8> -> !torch.int
%2222 = torch.aten.quantize_per_tensor %56, %2220, %2221, %int12_697 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2223 = torch.aten.int_repr %2222 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%2224 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2225 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2226 = torch.aten.item %2224 : !torch.vtensor<[],f32> -> !torch.float
%2227 = torch.aten.item %2225 : !torch.vtensor<[],si8> -> !torch.int
%2228 = torch.aten._make_per_tensor_quantized_tensor %2223, %2226, %2227 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2229 = torch.aten.dequantize.self %2228 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%2230 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2231 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_698 = torch.constant.int 12
%2232 = torch.aten.item %2230 : !torch.vtensor<[],f32> -> !torch.float
%2233 = torch.aten.item %2231 : !torch.vtensor<[],si8> -> !torch.int
%2234 = torch.aten.quantize_per_tensor %57, %2232, %2233, %int12_698 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2235 = torch.aten.int_repr %2234 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2236 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2237 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2238 = torch.aten.item %2236 : !torch.vtensor<[],f32> -> !torch.float
%2239 = torch.aten.item %2237 : !torch.vtensor<[],si8> -> !torch.int
%2240 = torch.aten._make_per_tensor_quantized_tensor %2235, %2238, %2239 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2241 = torch.aten.dequantize.self %2240 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_699 = torch.constant.int 1
%int1_700 = torch.constant.int 1
%int1_701 = torch.constant.int 1
%int1_702 = torch.constant.int 1
%int1_703 = torch.constant.int 1
%int1_704 = torch.constant.int 1
%int0_705 = torch.constant.int 0
%2242 = torch.prim.ListConstruct %int1_699, %int1_700 : (!torch.int, !torch.int) -> !torch.list<int>
%2243 = torch.prim.ListConstruct %int1_701, %int1_702 : (!torch.int, !torch.int) -> !torch.list<int>
%2244 = torch.prim.ListConstruct %int1_703, %int1_704 : (!torch.int, !torch.int) -> !torch.list<int>
%2245 = torch.prim.ListConstruct %int0_705, %int0_705 : (!torch.int, !torch.int) -> !torch.list<int>
%false_706 = torch.constant.bool false
%int1_707 = torch.constant.int 1
%2246 = torch.aten.convolution %2217, %2229, %2241, %2244, %2242, %2243, %false_706, %2245, %int1_707 : !torch.vtensor<[1,64,40,40],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,40,40],f32>
%2247 = torch.aten.relu %2246 : !torch.vtensor<[1,64,40,40],f32> -> !torch.vtensor<[1,64,40,40],f32>
%2248 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2249 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_708 = torch.constant.int 12
%2250 = torch.aten.item %2248 : !torch.vtensor<[],f32> -> !torch.float
%2251 = torch.aten.item %2249 : !torch.vtensor<[],si8> -> !torch.int
%2252 = torch.aten.quantize_per_tensor %2247, %2250, %2251, %int12_708 : !torch.vtensor<[1,64,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%2253 = torch.aten.int_repr %2252 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],si8>
%2254 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2255 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2256 = torch.aten.item %2254 : !torch.vtensor<[],f32> -> !torch.float
%2257 = torch.aten.item %2255 : !torch.vtensor<[],si8> -> !torch.int
%2258 = torch.aten._make_per_tensor_quantized_tensor %2253, %2256, %2257 : !torch.vtensor<[1,64,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%2259 = torch.aten.dequantize.self %2258 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],f32>
%int2_709 = torch.constant.int 2
%int2_710 = torch.constant.int 2
%2260 = torch.prim.ListConstruct %int2_709, %int2_710 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_711 = torch.constant.int 0
%int0_712 = torch.constant.int 0
%2261 = torch.prim.ListConstruct %int0_711, %int0_712 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_713 = torch.constant.int 2
%int2_714 = torch.constant.int 2
%2262 = torch.prim.ListConstruct %int2_713, %int2_714 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_715 = torch.constant.int 1
%int1_716 = torch.constant.int 1
%2263 = torch.prim.ListConstruct %int1_715, %int1_716 : (!torch.int, !torch.int) -> !torch.list<int>
%true_717 = torch.constant.bool true
%2264 = torch.aten.max_pool2d %2259, %2260, %2262, %2261, %2263, %true_717 : !torch.vtensor<[1,64,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,20,20],f32>
%2265 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2266 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_718 = torch.constant.int 12
%2267 = torch.aten.item %2265 : !torch.vtensor<[],f32> -> !torch.float
%2268 = torch.aten.item %2266 : !torch.vtensor<[],si8> -> !torch.int
%2269 = torch.aten.quantize_per_tensor %2264, %2267, %2268, %int12_718 : !torch.vtensor<[1,64,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%2270 = torch.aten.int_repr %2269 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],si8>
%2271 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2272 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2273 = torch.aten.item %2271 : !torch.vtensor<[],f32> -> !torch.float
%2274 = torch.aten.item %2272 : !torch.vtensor<[],si8> -> !torch.int
%2275 = torch.aten._make_per_tensor_quantized_tensor %2270, %2273, %2274 : !torch.vtensor<[1,64,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%2276 = torch.aten.dequantize.self %2275 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],f32>
%2277 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2278 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_719 = torch.constant.int 12
%2279 = torch.aten.item %2277 : !torch.vtensor<[],f32> -> !torch.float
%2280 = torch.aten.item %2278 : !torch.vtensor<[],si8> -> !torch.int
%2281 = torch.aten.quantize_per_tensor %58, %2279, %2280, %int12_719 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2282 = torch.aten.int_repr %2281 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%2283 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2284 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2285 = torch.aten.item %2283 : !torch.vtensor<[],f32> -> !torch.float
%2286 = torch.aten.item %2284 : !torch.vtensor<[],si8> -> !torch.int
%2287 = torch.aten._make_per_tensor_quantized_tensor %2282, %2285, %2286 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2288 = torch.aten.dequantize.self %2287 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%2289 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2290 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_720 = torch.constant.int 12
%2291 = torch.aten.item %2289 : !torch.vtensor<[],f32> -> !torch.float
%2292 = torch.aten.item %2290 : !torch.vtensor<[],si8> -> !torch.int
%2293 = torch.aten.quantize_per_tensor %59, %2291, %2292, %int12_720 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2294 = torch.aten.int_repr %2293 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2295 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2296 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2297 = torch.aten.item %2295 : !torch.vtensor<[],f32> -> !torch.float
%2298 = torch.aten.item %2296 : !torch.vtensor<[],si8> -> !torch.int
%2299 = torch.aten._make_per_tensor_quantized_tensor %2294, %2297, %2298 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2300 = torch.aten.dequantize.self %2299 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_721 = torch.constant.int 1
%int1_722 = torch.constant.int 1
%int1_723 = torch.constant.int 1
%int1_724 = torch.constant.int 1
%int1_725 = torch.constant.int 1
%int1_726 = torch.constant.int 1
%int0_727 = torch.constant.int 0
%2301 = torch.prim.ListConstruct %int1_721, %int1_722 : (!torch.int, !torch.int) -> !torch.list<int>
%2302 = torch.prim.ListConstruct %int1_723, %int1_724 : (!torch.int, !torch.int) -> !torch.list<int>
%2303 = torch.prim.ListConstruct %int1_725, %int1_726 : (!torch.int, !torch.int) -> !torch.list<int>
%2304 = torch.prim.ListConstruct %int0_727, %int0_727 : (!torch.int, !torch.int) -> !torch.list<int>
%false_728 = torch.constant.bool false
%int1_729 = torch.constant.int 1
%2305 = torch.aten.convolution %2276, %2288, %2300, %2303, %2301, %2302, %false_728, %2304, %int1_729 : !torch.vtensor<[1,64,20,20],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,20,20],f32>
%2306 = torch.aten.relu %2305 : !torch.vtensor<[1,64,20,20],f32> -> !torch.vtensor<[1,64,20,20],f32>
%2307 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2308 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_730 = torch.constant.int 12
%2309 = torch.aten.item %2307 : !torch.vtensor<[],f32> -> !torch.float
%2310 = torch.aten.item %2308 : !torch.vtensor<[],si8> -> !torch.int
%2311 = torch.aten.quantize_per_tensor %2306, %2309, %2310, %int12_730 : !torch.vtensor<[1,64,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%2312 = torch.aten.int_repr %2311 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],si8>
%2313 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2314 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2315 = torch.aten.item %2313 : !torch.vtensor<[],f32> -> !torch.float
%2316 = torch.aten.item %2314 : !torch.vtensor<[],si8> -> !torch.int
%2317 = torch.aten._make_per_tensor_quantized_tensor %2312, %2315, %2316 : !torch.vtensor<[1,64,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%2318 = torch.aten.dequantize.self %2317 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],f32>
%int2_731 = torch.constant.int 2
%int2_732 = torch.constant.int 2
%2319 = torch.prim.ListConstruct %int2_731, %int2_732 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_733 = torch.constant.int 0
%int0_734 = torch.constant.int 0
%2320 = torch.prim.ListConstruct %int0_733, %int0_734 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_735 = torch.constant.int 2
%int2_736 = torch.constant.int 2
%2321 = torch.prim.ListConstruct %int2_735, %int2_736 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_737 = torch.constant.int 1
%int1_738 = torch.constant.int 1
%2322 = torch.prim.ListConstruct %int1_737, %int1_738 : (!torch.int, !torch.int) -> !torch.list<int>
%true_739 = torch.constant.bool true
%2323 = torch.aten.max_pool2d %2318, %2319, %2321, %2320, %2322, %true_739 : !torch.vtensor<[1,64,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,10,10],f32>
%2324 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2325 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_740 = torch.constant.int 12
%2326 = torch.aten.item %2324 : !torch.vtensor<[],f32> -> !torch.float
%2327 = torch.aten.item %2325 : !torch.vtensor<[],si8> -> !torch.int
%2328 = torch.aten.quantize_per_tensor %2323, %2326, %2327, %int12_740 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%2329 = torch.aten.int_repr %2328 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%2330 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2331 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2332 = torch.aten.item %2330 : !torch.vtensor<[],f32> -> !torch.float
%2333 = torch.aten.item %2331 : !torch.vtensor<[],si8> -> !torch.int
%2334 = torch.aten._make_per_tensor_quantized_tensor %2329, %2332, %2333 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%2335 = torch.aten.dequantize.self %2334 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%2336 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2337 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_741 = torch.constant.int 12
%2338 = torch.aten.item %2336 : !torch.vtensor<[],f32> -> !torch.float
%2339 = torch.aten.item %2337 : !torch.vtensor<[],si8> -> !torch.int
%2340 = torch.aten.quantize_per_tensor %60, %2338, %2339, %int12_741 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2341 = torch.aten.int_repr %2340 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%2342 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2343 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2344 = torch.aten.item %2342 : !torch.vtensor<[],f32> -> !torch.float
%2345 = torch.aten.item %2343 : !torch.vtensor<[],si8> -> !torch.int
%2346 = torch.aten._make_per_tensor_quantized_tensor %2341, %2344, %2345 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2347 = torch.aten.dequantize.self %2346 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%2348 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2349 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_742 = torch.constant.int 12
%2350 = torch.aten.item %2348 : !torch.vtensor<[],f32> -> !torch.float
%2351 = torch.aten.item %2349 : !torch.vtensor<[],si8> -> !torch.int
%2352 = torch.aten.quantize_per_tensor %61, %2350, %2351, %int12_742 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2353 = torch.aten.int_repr %2352 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2354 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2355 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2356 = torch.aten.item %2354 : !torch.vtensor<[],f32> -> !torch.float
%2357 = torch.aten.item %2355 : !torch.vtensor<[],si8> -> !torch.int
%2358 = torch.aten._make_per_tensor_quantized_tensor %2353, %2356, %2357 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2359 = torch.aten.dequantize.self %2358 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_743 = torch.constant.int 1
%int1_744 = torch.constant.int 1
%int1_745 = torch.constant.int 1
%int1_746 = torch.constant.int 1
%int1_747 = torch.constant.int 1
%int1_748 = torch.constant.int 1
%int0_749 = torch.constant.int 0
%2360 = torch.prim.ListConstruct %int1_743, %int1_744 : (!torch.int, !torch.int) -> !torch.list<int>
%2361 = torch.prim.ListConstruct %int1_745, %int1_746 : (!torch.int, !torch.int) -> !torch.list<int>
%2362 = torch.prim.ListConstruct %int1_747, %int1_748 : (!torch.int, !torch.int) -> !torch.list<int>
%2363 = torch.prim.ListConstruct %int0_749, %int0_749 : (!torch.int, !torch.int) -> !torch.list<int>
%false_750 = torch.constant.bool false
%int1_751 = torch.constant.int 1
%2364 = torch.aten.convolution %2335, %2347, %2359, %2362, %2360, %2361, %false_750, %2363, %int1_751 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%2365 = torch.aten.relu %2364 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[1,64,10,10],f32>
%2366 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2367 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_752 = torch.constant.int 12
%2368 = torch.aten.item %2366 : !torch.vtensor<[],f32> -> !torch.float
%2369 = torch.aten.item %2367 : !torch.vtensor<[],si8> -> !torch.int
%2370 = torch.aten.quantize_per_tensor %2365, %2368, %2369, %int12_752 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%2371 = torch.aten.int_repr %2370 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%2372 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2373 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2374 = torch.aten.item %2372 : !torch.vtensor<[],f32> -> !torch.float
%2375 = torch.aten.item %2373 : !torch.vtensor<[],si8> -> !torch.int
%2376 = torch.aten._make_per_tensor_quantized_tensor %2371, %2374, %2375 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%2377 = torch.aten.dequantize.self %2376 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%2378 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2379 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_753 = torch.constant.int 12
%2380 = torch.aten.item %2378 : !torch.vtensor<[],f32> -> !torch.float
%2381 = torch.aten.item %2379 : !torch.vtensor<[],si8> -> !torch.int
%2382 = torch.aten.quantize_per_tensor %62, %2380, %2381, %int12_753 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2383 = torch.aten.int_repr %2382 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%2384 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2385 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2386 = torch.aten.item %2384 : !torch.vtensor<[],f32> -> !torch.float
%2387 = torch.aten.item %2385 : !torch.vtensor<[],si8> -> !torch.int
%2388 = torch.aten._make_per_tensor_quantized_tensor %2383, %2386, %2387 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%2389 = torch.aten.dequantize.self %2388 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%2390 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2391 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_754 = torch.constant.int 12
%2392 = torch.aten.item %2390 : !torch.vtensor<[],f32> -> !torch.float
%2393 = torch.aten.item %2391 : !torch.vtensor<[],si8> -> !torch.int
%2394 = torch.aten.quantize_per_tensor %63, %2392, %2393, %int12_754 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2395 = torch.aten.int_repr %2394 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2396 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2397 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2398 = torch.aten.item %2396 : !torch.vtensor<[],f32> -> !torch.float
%2399 = torch.aten.item %2397 : !torch.vtensor<[],si8> -> !torch.int
%2400 = torch.aten._make_per_tensor_quantized_tensor %2395, %2398, %2399 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2401 = torch.aten.dequantize.self %2400 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int2_755 = torch.constant.int 2
%int2_756 = torch.constant.int 2
%int2_757 = torch.constant.int 2
%int2_758 = torch.constant.int 2
%int1_759 = torch.constant.int 1
%int1_760 = torch.constant.int 1
%int0_761 = torch.constant.int 0
%2402 = torch.prim.ListConstruct %int2_755, %int2_756 : (!torch.int, !torch.int) -> !torch.list<int>
%2403 = torch.prim.ListConstruct %int2_757, %int2_758 : (!torch.int, !torch.int) -> !torch.list<int>
%2404 = torch.prim.ListConstruct %int1_759, %int1_760 : (!torch.int, !torch.int) -> !torch.list<int>
%2405 = torch.prim.ListConstruct %int0_761, %int0_761 : (!torch.int, !torch.int) -> !torch.list<int>
%false_762 = torch.constant.bool false
%int1_763 = torch.constant.int 1
%2406 = torch.aten.convolution %2377, %2389, %2401, %2404, %2402, %2403, %false_762, %2405, %int1_763 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%2407 = torch.aten.relu %2406 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[1,64,10,10],f32>
%2408 = torch.prim.ListConstruct %2407, %2377 : (!torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[1,64,10,10],f32>) -> !torch.list<vtensor>
%int1_764 = torch.constant.int 1
%2409 = torch.aten.cat %2408, %int1_764 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%2410 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2411 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_765 = torch.constant.int 12
%2412 = torch.aten.item %2410 : !torch.vtensor<[],f32> -> !torch.float
%2413 = torch.aten.item %2411 : !torch.vtensor<[],si8> -> !torch.int
%2414 = torch.aten.quantize_per_tensor %2409, %2412, %2413, %int12_765 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%2415 = torch.aten.int_repr %2414 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%2416 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2418 = torch.aten.item %2416 : !torch.vtensor<[],f32> -> !torch.float
%2419 = torch.aten.item %2417 : !torch.vtensor<[],si8> -> !torch.int
%2420 = torch.aten._make_per_tensor_quantized_tensor %2415, %2418, %2419 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%2421 = torch.aten.dequantize.self %2420 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%2422 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_766 = torch.constant.int 12
%2424 = torch.aten.item %2422 : !torch.vtensor<[],f32> -> !torch.float
%2425 = torch.aten.item %2423 : !torch.vtensor<[],si8> -> !torch.int
%2426 = torch.aten.quantize_per_tensor %64, %2424, %2425, %int12_766 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%2427 = torch.aten.int_repr %2426 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%2428 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2430 = torch.aten.item %2428 : !torch.vtensor<[],f32> -> !torch.float
%2431 = torch.aten.item %2429 : !torch.vtensor<[],si8> -> !torch.int
%2432 = torch.aten._make_per_tensor_quantized_tensor %2427, %2430, %2431 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%2433 = torch.aten.dequantize.self %2432 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%2434 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_767 = torch.constant.int 12
%2436 = torch.aten.item %2434 : !torch.vtensor<[],f32> -> !torch.float
%2437 = torch.aten.item %2435 : !torch.vtensor<[],si8> -> !torch.int
%2438 = torch.aten.quantize_per_tensor %65, %2436, %2437, %int12_767 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2439 = torch.aten.int_repr %2438 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2440 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2442 = torch.aten.item %2440 : !torch.vtensor<[],f32> -> !torch.float
%2443 = torch.aten.item %2441 : !torch.vtensor<[],si8> -> !torch.int
%2444 = torch.aten._make_per_tensor_quantized_tensor %2439, %2442, %2443 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2445 = torch.aten.dequantize.self %2444 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_768 = torch.constant.int 1
%int1_769 = torch.constant.int 1
%int1_770 = torch.constant.int 1
%int1_771 = torch.constant.int 1
%int1_772 = torch.constant.int 1
%int1_773 = torch.constant.int 1
%int0_774 = torch.constant.int 0
%2446 = torch.prim.ListConstruct %int1_768, %int1_769 : (!torch.int, !torch.int) -> !torch.list<int>
%2447 = torch.prim.ListConstruct %int1_770, %int1_771 : (!torch.int, !torch.int) -> !torch.list<int>
%2448 = torch.prim.ListConstruct %int1_772, %int1_773 : (!torch.int, !torch.int) -> !torch.list<int>
%2449 = torch.prim.ListConstruct %int0_774, %int0_774 : (!torch.int, !torch.int) -> !torch.list<int>
%false_775 = torch.constant.bool false
%int1_776 = torch.constant.int 1
%2450 = torch.aten.convolution %2421, %2433, %2445, %2448, %2446, %2447, %false_775, %2449, %int1_776 : !torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%2451 = torch.aten.relu %2450 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[1,64,10,10],f32>
%2452 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2453 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_777 = torch.constant.int 12
%2454 = torch.aten.item %2452 : !torch.vtensor<[],f32> -> !torch.float
%2455 = torch.aten.item %2453 : !torch.vtensor<[],si8> -> !torch.int
%2456 = torch.aten.quantize_per_tensor %2451, %2454, %2455, %int12_777 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%2457 = torch.aten.int_repr %2456 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%2458 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2459 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2460 = torch.aten.item %2458 : !torch.vtensor<[],f32> -> !torch.float
%2461 = torch.aten.item %2459 : !torch.vtensor<[],si8> -> !torch.int
%2462 = torch.aten._make_per_tensor_quantized_tensor %2457, %2460, %2461 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%2463 = torch.aten.dequantize.self %2462 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%2464 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%2465 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%2466 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_778 = torch.constant.int 0
%int0_779 = torch.constant.int 0
%int0_780 = torch.constant.int 0
%2467 = torch.aten.select.int %2466, %int0_778, %int0_780 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2468 = torch.aten.item %2467 : !torch.vtensor<[1],si64> -> !torch.int
%2469 = torch.aten.lt.int %2468, %int0_778 : !torch.int, !torch.int -> !torch.bool
%2470 = torch.aten.Int.bool %2469 : !torch.bool -> !torch.int
%2471 = torch.aten.mul.int %2470, %int0_779 : !torch.int, !torch.int -> !torch.int
%2472 = torch.aten.add.int %2468, %2471 : !torch.int, !torch.int -> !torch.int
%2473 = torch.prim.ListConstruct %2472 : (!torch.int) -> !torch.list<int>
%false_781 = torch.constant.bool false
%none_782 = torch.constant.none
%2474 = torch.aten.tensor %2473, %none_782, %none_782, %false_781 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_783, %indices_784 = torch.aten.sort %2474, %int0_778, %false_781 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_785 = torch.constant.int 0
%2475 = torch.aten.select.int %values_783, %int0_778, %int0_785 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2476 = torch.aten.item %2475 : !torch.vtensor<[1],si64> -> !torch.int
%2477 = torch.aten.unsqueeze %2464, %2476 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2478 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_786 = torch.constant.int 0
%int0_787 = torch.constant.int 0
%int0_788 = torch.constant.int 0
%2479 = torch.aten.select.int %2478, %int0_786, %int0_788 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2480 = torch.aten.item %2479 : !torch.vtensor<[1],si64> -> !torch.int
%2481 = torch.aten.lt.int %2480, %int0_786 : !torch.int, !torch.int -> !torch.bool
%2482 = torch.aten.Int.bool %2481 : !torch.bool -> !torch.int
%2483 = torch.aten.mul.int %2482, %int0_787 : !torch.int, !torch.int -> !torch.int
%2484 = torch.aten.add.int %2480, %2483 : !torch.int, !torch.int -> !torch.int
%2485 = torch.prim.ListConstruct %2484 : (!torch.int) -> !torch.list<int>
%false_789 = torch.constant.bool false
%none_790 = torch.constant.none
%2486 = torch.aten.tensor %2485, %none_790, %none_790, %false_789 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_791, %indices_792 = torch.aten.sort %2486, %int0_786, %false_789 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_793 = torch.constant.int 0
%2487 = torch.aten.select.int %values_791, %int0_786, %int0_793 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2488 = torch.aten.item %2487 : !torch.vtensor<[1],si64> -> !torch.int
%2489 = torch.aten.unsqueeze %2465, %2488 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2490 = torch.prim.ListConstruct %2477, %2489 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_794 = torch.constant.int 0
%2491 = torch.aten.cat %2490, %int0_794 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%2492 = torch.aten._shape_as_tensor %2463 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[4],si64>
%2493 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2494 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2495 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_795 = torch.constant.none
%int1_796 = torch.constant.int 1
%2496 = torch.prim.ListConstruct %int1_796 : (!torch.int) -> !torch.list<int>
%2497 = torch.aten.ones %2496, %none_795, %none_795, %none_795, %none_795 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_797 = torch.constant.int 0
%int0_798 = torch.constant.int 0
%2498 = torch.prim.NumToTensor.Scalar %int0_798 : !torch.int -> !torch.vtensor<[1],si64>
%2499 = torch.aten.index_select %2494, %int0_797, %2498 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2500 = torch.aten.item %2499 : !torch.vtensor<[1],si64> -> !torch.int
%2501 = torch.aten.index_select %2495, %int0_797, %2498 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2502 = torch.aten.item %2501 : !torch.vtensor<[1],si64> -> !torch.int
%2503 = torch.aten.index_select %2493, %int0_797, %2498 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2504 = torch.aten.item %2503 : !torch.vtensor<[1],si64> -> !torch.int
%2505 = torch.aten.index_select %2497, %int0_797, %2498 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2506 = torch.aten.item %2505 : !torch.vtensor<[1],si64> -> !torch.int
%2507 = torch.aten.slice.Tensor %2492, %2504, %2500, %2502, %2506 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_799 = torch.constant.int 4
%none_800 = torch.constant.none
%false_801 = torch.constant.bool false
%2508 = torch.aten.to.dtype %2491, %int4_799, %false_801, %false_801, %none_800 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%2509 = torch.prim.ListConstruct %2507, %2508 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_802 = torch.constant.int 0
%2510 = torch.aten.cat %2509, %int0_802 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%2511 = torch.operator "onnx.Resize"(%2463, %none, %none, %2510) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%2512 = torch.prim.ListConstruct %2511, %2318 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,20,20],f32>) -> !torch.list<vtensor>
%int1_803 = torch.constant.int 1
%2513 = torch.aten.cat %2512, %int1_803 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%2514 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2515 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_804 = torch.constant.int 12
%2516 = torch.aten.item %2514 : !torch.vtensor<[],f32> -> !torch.float
%2517 = torch.aten.item %2515 : !torch.vtensor<[],si8> -> !torch.int
%2518 = torch.aten.quantize_per_tensor %2513, %2516, %2517, %int12_804 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%2519 = torch.aten.int_repr %2518 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%2520 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2522 = torch.aten.item %2520 : !torch.vtensor<[],f32> -> !torch.float
%2523 = torch.aten.item %2521 : !torch.vtensor<[],si8> -> !torch.int
%2524 = torch.aten._make_per_tensor_quantized_tensor %2519, %2522, %2523 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%2525 = torch.aten.dequantize.self %2524 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%2526 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2527 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_805 = torch.constant.int 12
%2528 = torch.aten.item %2526 : !torch.vtensor<[],f32> -> !torch.float
%2529 = torch.aten.item %2527 : !torch.vtensor<[],si8> -> !torch.int
%2530 = torch.aten.quantize_per_tensor %66, %2528, %2529, %int12_805 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%2531 = torch.aten.int_repr %2530 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%2532 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2533 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2534 = torch.aten.item %2532 : !torch.vtensor<[],f32> -> !torch.float
%2535 = torch.aten.item %2533 : !torch.vtensor<[],si8> -> !torch.int
%2536 = torch.aten._make_per_tensor_quantized_tensor %2531, %2534, %2535 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%2537 = torch.aten.dequantize.self %2536 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%2538 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2539 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_806 = torch.constant.int 12
%2540 = torch.aten.item %2538 : !torch.vtensor<[],f32> -> !torch.float
%2541 = torch.aten.item %2539 : !torch.vtensor<[],si8> -> !torch.int
%2542 = torch.aten.quantize_per_tensor %67, %2540, %2541, %int12_806 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2543 = torch.aten.int_repr %2542 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2544 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2545 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2546 = torch.aten.item %2544 : !torch.vtensor<[],f32> -> !torch.float
%2547 = torch.aten.item %2545 : !torch.vtensor<[],si8> -> !torch.int
%2548 = torch.aten._make_per_tensor_quantized_tensor %2543, %2546, %2547 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2549 = torch.aten.dequantize.self %2548 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_807 = torch.constant.int 1
%int1_808 = torch.constant.int 1
%int1_809 = torch.constant.int 1
%int1_810 = torch.constant.int 1
%int1_811 = torch.constant.int 1
%int1_812 = torch.constant.int 1
%int0_813 = torch.constant.int 0
%2550 = torch.prim.ListConstruct %int1_807, %int1_808 : (!torch.int, !torch.int) -> !torch.list<int>
%2551 = torch.prim.ListConstruct %int1_809, %int1_810 : (!torch.int, !torch.int) -> !torch.list<int>
%2552 = torch.prim.ListConstruct %int1_811, %int1_812 : (!torch.int, !torch.int) -> !torch.list<int>
%2553 = torch.prim.ListConstruct %int0_813, %int0_813 : (!torch.int, !torch.int) -> !torch.list<int>
%false_814 = torch.constant.bool false
%int1_815 = torch.constant.int 1
%2554 = torch.aten.convolution %2525, %2537, %2549, %2552, %2550, %2551, %false_814, %2553, %int1_815 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,20,20],f32>
%2555 = torch.aten.relu %2554 : !torch.vtensor<[1,64,20,20],f32> -> !torch.vtensor<[1,64,20,20],f32>
%2556 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2557 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_816 = torch.constant.int 12
%2558 = torch.aten.item %2556 : !torch.vtensor<[],f32> -> !torch.float
%2559 = torch.aten.item %2557 : !torch.vtensor<[],si8> -> !torch.int
%2560 = torch.aten.quantize_per_tensor %2555, %2558, %2559, %int12_816 : !torch.vtensor<[1,64,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%2561 = torch.aten.int_repr %2560 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],si8>
%2562 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2563 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2564 = torch.aten.item %2562 : !torch.vtensor<[],f32> -> !torch.float
%2565 = torch.aten.item %2563 : !torch.vtensor<[],si8> -> !torch.int
%2566 = torch.aten._make_per_tensor_quantized_tensor %2561, %2564, %2565 : !torch.vtensor<[1,64,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%2567 = torch.aten.dequantize.self %2566 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],f32>
%2568 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%2569 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%2570 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_817 = torch.constant.int 0
%int0_818 = torch.constant.int 0
%int0_819 = torch.constant.int 0
%2571 = torch.aten.select.int %2570, %int0_817, %int0_819 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2572 = torch.aten.item %2571 : !torch.vtensor<[1],si64> -> !torch.int
%2573 = torch.aten.lt.int %2572, %int0_817 : !torch.int, !torch.int -> !torch.bool
%2574 = torch.aten.Int.bool %2573 : !torch.bool -> !torch.int
%2575 = torch.aten.mul.int %2574, %int0_818 : !torch.int, !torch.int -> !torch.int
%2576 = torch.aten.add.int %2572, %2575 : !torch.int, !torch.int -> !torch.int
%2577 = torch.prim.ListConstruct %2576 : (!torch.int) -> !torch.list<int>
%false_820 = torch.constant.bool false
%none_821 = torch.constant.none
%2578 = torch.aten.tensor %2577, %none_821, %none_821, %false_820 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_822, %indices_823 = torch.aten.sort %2578, %int0_817, %false_820 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_824 = torch.constant.int 0
%2579 = torch.aten.select.int %values_822, %int0_817, %int0_824 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2580 = torch.aten.item %2579 : !torch.vtensor<[1],si64> -> !torch.int
%2581 = torch.aten.unsqueeze %2568, %2580 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2582 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_825 = torch.constant.int 0
%int0_826 = torch.constant.int 0
%int0_827 = torch.constant.int 0
%2583 = torch.aten.select.int %2582, %int0_825, %int0_827 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2584 = torch.aten.item %2583 : !torch.vtensor<[1],si64> -> !torch.int
%2585 = torch.aten.lt.int %2584, %int0_825 : !torch.int, !torch.int -> !torch.bool
%2586 = torch.aten.Int.bool %2585 : !torch.bool -> !torch.int
%2587 = torch.aten.mul.int %2586, %int0_826 : !torch.int, !torch.int -> !torch.int
%2588 = torch.aten.add.int %2584, %2587 : !torch.int, !torch.int -> !torch.int
%2589 = torch.prim.ListConstruct %2588 : (!torch.int) -> !torch.list<int>
%false_828 = torch.constant.bool false
%none_829 = torch.constant.none
%2590 = torch.aten.tensor %2589, %none_829, %none_829, %false_828 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_830, %indices_831 = torch.aten.sort %2590, %int0_825, %false_828 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_832 = torch.constant.int 0
%2591 = torch.aten.select.int %values_830, %int0_825, %int0_832 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2592 = torch.aten.item %2591 : !torch.vtensor<[1],si64> -> !torch.int
%2593 = torch.aten.unsqueeze %2569, %2592 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2594 = torch.prim.ListConstruct %2581, %2593 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_833 = torch.constant.int 0
%2595 = torch.aten.cat %2594, %int0_833 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%2596 = torch.aten._shape_as_tensor %2567 : !torch.vtensor<[1,64,20,20],f32> -> !torch.vtensor<[4],si64>
%2597 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2598 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2599 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_834 = torch.constant.none
%int1_835 = torch.constant.int 1
%2600 = torch.prim.ListConstruct %int1_835 : (!torch.int) -> !torch.list<int>
%2601 = torch.aten.ones %2600, %none_834, %none_834, %none_834, %none_834 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_836 = torch.constant.int 0
%int0_837 = torch.constant.int 0
%2602 = torch.prim.NumToTensor.Scalar %int0_837 : !torch.int -> !torch.vtensor<[1],si64>
%2603 = torch.aten.index_select %2598, %int0_836, %2602 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2604 = torch.aten.item %2603 : !torch.vtensor<[1],si64> -> !torch.int
%2605 = torch.aten.index_select %2599, %int0_836, %2602 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2606 = torch.aten.item %2605 : !torch.vtensor<[1],si64> -> !torch.int
%2607 = torch.aten.index_select %2597, %int0_836, %2602 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2608 = torch.aten.item %2607 : !torch.vtensor<[1],si64> -> !torch.int
%2609 = torch.aten.index_select %2601, %int0_836, %2602 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2610 = torch.aten.item %2609 : !torch.vtensor<[1],si64> -> !torch.int
%2611 = torch.aten.slice.Tensor %2596, %2608, %2604, %2606, %2610 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_838 = torch.constant.int 4
%none_839 = torch.constant.none
%false_840 = torch.constant.bool false
%2612 = torch.aten.to.dtype %2595, %int4_838, %false_840, %false_840, %none_839 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%2613 = torch.prim.ListConstruct %2611, %2612 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_841 = torch.constant.int 0
%2614 = torch.aten.cat %2613, %int0_841 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%2615 = torch.operator "onnx.Resize"(%2567, %none, %none, %2614) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%2616 = torch.prim.ListConstruct %2615, %2259 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,40,40],f32>) -> !torch.list<vtensor>
%int1_842 = torch.constant.int 1
%2617 = torch.aten.cat %2616, %int1_842 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%2618 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2619 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_843 = torch.constant.int 12
%2620 = torch.aten.item %2618 : !torch.vtensor<[],f32> -> !torch.float
%2621 = torch.aten.item %2619 : !torch.vtensor<[],si8> -> !torch.int
%2622 = torch.aten.quantize_per_tensor %2617, %2620, %2621, %int12_843 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%2623 = torch.aten.int_repr %2622 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%2624 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2625 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2626 = torch.aten.item %2624 : !torch.vtensor<[],f32> -> !torch.float
%2627 = torch.aten.item %2625 : !torch.vtensor<[],si8> -> !torch.int
%2628 = torch.aten._make_per_tensor_quantized_tensor %2623, %2626, %2627 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%2629 = torch.aten.dequantize.self %2628 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%2630 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2631 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_844 = torch.constant.int 12
%2632 = torch.aten.item %2630 : !torch.vtensor<[],f32> -> !torch.float
%2633 = torch.aten.item %2631 : !torch.vtensor<[],si8> -> !torch.int
%2634 = torch.aten.quantize_per_tensor %68, %2632, %2633, %int12_844 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%2635 = torch.aten.int_repr %2634 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%2636 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2637 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2638 = torch.aten.item %2636 : !torch.vtensor<[],f32> -> !torch.float
%2639 = torch.aten.item %2637 : !torch.vtensor<[],si8> -> !torch.int
%2640 = torch.aten._make_per_tensor_quantized_tensor %2635, %2638, %2639 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%2641 = torch.aten.dequantize.self %2640 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%2642 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2643 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_845 = torch.constant.int 12
%2644 = torch.aten.item %2642 : !torch.vtensor<[],f32> -> !torch.float
%2645 = torch.aten.item %2643 : !torch.vtensor<[],si8> -> !torch.int
%2646 = torch.aten.quantize_per_tensor %69, %2644, %2645, %int12_845 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2647 = torch.aten.int_repr %2646 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%2648 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2649 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2650 = torch.aten.item %2648 : !torch.vtensor<[],f32> -> !torch.float
%2651 = torch.aten.item %2649 : !torch.vtensor<[],si8> -> !torch.int
%2652 = torch.aten._make_per_tensor_quantized_tensor %2647, %2650, %2651 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%2653 = torch.aten.dequantize.self %2652 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_846 = torch.constant.int 1
%int1_847 = torch.constant.int 1
%int1_848 = torch.constant.int 1
%int1_849 = torch.constant.int 1
%int1_850 = torch.constant.int 1
%int1_851 = torch.constant.int 1
%int0_852 = torch.constant.int 0
%2654 = torch.prim.ListConstruct %int1_846, %int1_847 : (!torch.int, !torch.int) -> !torch.list<int>
%2655 = torch.prim.ListConstruct %int1_848, %int1_849 : (!torch.int, !torch.int) -> !torch.list<int>
%2656 = torch.prim.ListConstruct %int1_850, %int1_851 : (!torch.int, !torch.int) -> !torch.list<int>
%2657 = torch.prim.ListConstruct %int0_852, %int0_852 : (!torch.int, !torch.int) -> !torch.list<int>
%false_853 = torch.constant.bool false
%int1_854 = torch.constant.int 1
%2658 = torch.aten.convolution %2629, %2641, %2653, %2656, %2654, %2655, %false_853, %2657, %int1_854 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,40,40],f32>
%2659 = torch.aten.relu %2658 : !torch.vtensor<[1,64,40,40],f32> -> !torch.vtensor<[1,64,40,40],f32>
%2660 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2661 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_855 = torch.constant.int 12
%2662 = torch.aten.item %2660 : !torch.vtensor<[],f32> -> !torch.float
%2663 = torch.aten.item %2661 : !torch.vtensor<[],si8> -> !torch.int
%2664 = torch.aten.quantize_per_tensor %2659, %2662, %2663, %int12_855 : !torch.vtensor<[1,64,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%2665 = torch.aten.int_repr %2664 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],si8>
%2666 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2667 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2668 = torch.aten.item %2666 : !torch.vtensor<[],f32> -> !torch.float
%2669 = torch.aten.item %2667 : !torch.vtensor<[],si8> -> !torch.int
%2670 = torch.aten._make_per_tensor_quantized_tensor %2665, %2668, %2669 : !torch.vtensor<[1,64,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%2671 = torch.aten.dequantize.self %2670 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],f32>
%2672 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%2673 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%2674 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_856 = torch.constant.int 0
%int0_857 = torch.constant.int 0
%int0_858 = torch.constant.int 0
%2675 = torch.aten.select.int %2674, %int0_856, %int0_858 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2676 = torch.aten.item %2675 : !torch.vtensor<[1],si64> -> !torch.int
%2677 = torch.aten.lt.int %2676, %int0_856 : !torch.int, !torch.int -> !torch.bool
%2678 = torch.aten.Int.bool %2677 : !torch.bool -> !torch.int
%2679 = torch.aten.mul.int %2678, %int0_857 : !torch.int, !torch.int -> !torch.int
%2680 = torch.aten.add.int %2676, %2679 : !torch.int, !torch.int -> !torch.int
%2681 = torch.prim.ListConstruct %2680 : (!torch.int) -> !torch.list<int>
%false_859 = torch.constant.bool false
%none_860 = torch.constant.none
%2682 = torch.aten.tensor %2681, %none_860, %none_860, %false_859 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_861, %indices_862 = torch.aten.sort %2682, %int0_856, %false_859 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_863 = torch.constant.int 0
%2683 = torch.aten.select.int %values_861, %int0_856, %int0_863 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2684 = torch.aten.item %2683 : !torch.vtensor<[1],si64> -> !torch.int
%2685 = torch.aten.unsqueeze %2672, %2684 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2686 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_864 = torch.constant.int 0
%int0_865 = torch.constant.int 0
%int0_866 = torch.constant.int 0
%2687 = torch.aten.select.int %2686, %int0_864, %int0_866 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2688 = torch.aten.item %2687 : !torch.vtensor<[1],si64> -> !torch.int
%2689 = torch.aten.lt.int %2688, %int0_864 : !torch.int, !torch.int -> !torch.bool
%2690 = torch.aten.Int.bool %2689 : !torch.bool -> !torch.int
%2691 = torch.aten.mul.int %2690, %int0_865 : !torch.int, !torch.int -> !torch.int
%2692 = torch.aten.add.int %2688, %2691 : !torch.int, !torch.int -> !torch.int
%2693 = torch.prim.ListConstruct %2692 : (!torch.int) -> !torch.list<int>
%false_867 = torch.constant.bool false
%none_868 = torch.constant.none
%2694 = torch.aten.tensor %2693, %none_868, %none_868, %false_867 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_869, %indices_870 = torch.aten.sort %2694, %int0_864, %false_867 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_871 = torch.constant.int 0
%2695 = torch.aten.select.int %values_869, %int0_864, %int0_871 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%2696 = torch.aten.item %2695 : !torch.vtensor<[1],si64> -> !torch.int
%2697 = torch.aten.unsqueeze %2673, %2696 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%2698 = torch.prim.ListConstruct %2685, %2697 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_872 = torch.constant.int 0
%2699 = torch.aten.cat %2698, %int0_872 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%2700 = torch.aten._shape_as_tensor %2671 : !torch.vtensor<[1,64,40,40],f32> -> !torch.vtensor<[4],si64>
%2701 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2702 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%2703 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_873 = torch.constant.none
%int1_874 = torch.constant.int 1
%2704 = torch.prim.ListConstruct %int1_874 : (!torch.int) -> !torch.list<int>
%2705 = torch.aten.ones %2704, %none_873, %none_873, %none_873, %none_873 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_875 = torch.constant.int 0
%int0_876 = torch.constant.int 0
%2706 = torch.prim.NumToTensor.Scalar %int0_876 : !torch.int -> !torch.vtensor<[1],si64>
%2707 = torch.aten.index_select %2702, %int0_875, %2706 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2708 = torch.aten.item %2707 : !torch.vtensor<[1],si64> -> !torch.int
%2709 = torch.aten.index_select %2703, %int0_875, %2706 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2710 = torch.aten.item %2709 : !torch.vtensor<[1],si64> -> !torch.int
%2711 = torch.aten.index_select %2701, %int0_875, %2706 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2712 = torch.aten.item %2711 : !torch.vtensor<[1],si64> -> !torch.int
%2713 = torch.aten.index_select %2705, %int0_875, %2706 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%2714 = torch.aten.item %2713 : !torch.vtensor<[1],si64> -> !torch.int
%2715 = torch.aten.slice.Tensor %2700, %2712, %2708, %2710, %2714 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_877 = torch.constant.int 4
%none_878 = torch.constant.none
%false_879 = torch.constant.bool false
%2716 = torch.aten.to.dtype %2699, %int4_877, %false_879, %false_879, %none_878 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%2717 = torch.prim.ListConstruct %2715, %2716 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_880 = torch.constant.int 0
%2718 = torch.aten.cat %2717, %int0_880 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%2719 = torch.operator "onnx.Resize"(%2671, %none, %none, %2718) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,40,40],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%2720 = torch.prim.ListConstruct %2719, %2200 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,80,80],f32>) -> !torch.list<vtensor>
%int1_881 = torch.constant.int 1
%2721 = torch.aten.cat %2720, %int1_881 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,80,80],f32>
%2722 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2723 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_882 = torch.constant.int 12
%2724 = torch.aten.item %2722 : !torch.vtensor<[],f32> -> !torch.float
%2725 = torch.aten.item %2723 : !torch.vtensor<[],si8> -> !torch.int
%2726 = torch.aten.quantize_per_tensor %2721, %2724, %2725, %int12_882 : !torch.vtensor<[1,?,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%2727 = torch.aten.int_repr %2726 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],si8>
%2728 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2729 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2730 = torch.aten.item %2728 : !torch.vtensor<[],f32> -> !torch.float
%2731 = torch.aten.item %2729 : !torch.vtensor<[],si8> -> !torch.int
%2732 = torch.aten._make_per_tensor_quantized_tensor %2727, %2730, %2731 : !torch.vtensor<[1,?,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%2733 = torch.aten.dequantize.self %2732 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],f32>
%2734 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2735 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_883 = torch.constant.int 12
%2736 = torch.aten.item %2734 : !torch.vtensor<[],f32> -> !torch.float
%2737 = torch.aten.item %2735 : !torch.vtensor<[],si8> -> !torch.int
%2738 = torch.aten.quantize_per_tensor %70, %2736, %2737, %int12_883 : !torch.vtensor<[256,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,128,3,3],!torch.qint8>
%2739 = torch.aten.int_repr %2738 : !torch.vtensor<[256,128,3,3],!torch.qint8> -> !torch.vtensor<[256,128,3,3],si8>
%2740 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2742 = torch.aten.item %2740 : !torch.vtensor<[],f32> -> !torch.float
%2743 = torch.aten.item %2741 : !torch.vtensor<[],si8> -> !torch.int
%2744 = torch.aten._make_per_tensor_quantized_tensor %2739, %2742, %2743 : !torch.vtensor<[256,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,128,3,3],!torch.qint8>
%2745 = torch.aten.dequantize.self %2744 : !torch.vtensor<[256,128,3,3],!torch.qint8> -> !torch.vtensor<[256,128,3,3],f32>
%2746 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_884 = torch.constant.int 12
%2748 = torch.aten.item %2746 : !torch.vtensor<[],f32> -> !torch.float
%2749 = torch.aten.item %2747 : !torch.vtensor<[],si8> -> !torch.int
%2750 = torch.aten.quantize_per_tensor %71, %2748, %2749, %int12_884 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2751 = torch.aten.int_repr %2750 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2752 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2754 = torch.aten.item %2752 : !torch.vtensor<[],f32> -> !torch.float
%2755 = torch.aten.item %2753 : !torch.vtensor<[],si8> -> !torch.int
%2756 = torch.aten._make_per_tensor_quantized_tensor %2751, %2754, %2755 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2757 = torch.aten.dequantize.self %2756 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_885 = torch.constant.int 1
%int1_886 = torch.constant.int 1
%int1_887 = torch.constant.int 1
%int1_888 = torch.constant.int 1
%int1_889 = torch.constant.int 1
%int1_890 = torch.constant.int 1
%int0_891 = torch.constant.int 0
%2758 = torch.prim.ListConstruct %int1_885, %int1_886 : (!torch.int, !torch.int) -> !torch.list<int>
%2759 = torch.prim.ListConstruct %int1_887, %int1_888 : (!torch.int, !torch.int) -> !torch.list<int>
%2760 = torch.prim.ListConstruct %int1_889, %int1_890 : (!torch.int, !torch.int) -> !torch.list<int>
%2761 = torch.prim.ListConstruct %int0_891, %int0_891 : (!torch.int, !torch.int) -> !torch.list<int>
%false_892 = torch.constant.bool false
%int1_893 = torch.constant.int 1
%2762 = torch.aten.convolution %2733, %2745, %2757, %2760, %2758, %2759, %false_892, %2761, %int1_893 : !torch.vtensor<[1,?,80,80],f32>, !torch.vtensor<[256,128,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,80,80],f32>
%2763 = torch.aten.relu %2762 : !torch.vtensor<[1,256,80,80],f32> -> !torch.vtensor<[1,256,80,80],f32>
%2764 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2765 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_894 = torch.constant.int 12
%2766 = torch.aten.item %2764 : !torch.vtensor<[],f32> -> !torch.float
%2767 = torch.aten.item %2765 : !torch.vtensor<[],si8> -> !torch.int
%2768 = torch.aten.quantize_per_tensor %2763, %2766, %2767, %int12_894 : !torch.vtensor<[1,256,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,80,80],!torch.qint8>
%2769 = torch.aten.int_repr %2768 : !torch.vtensor<[1,256,80,80],!torch.qint8> -> !torch.vtensor<[1,256,80,80],si8>
%2770 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2771 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2772 = torch.aten.item %2770 : !torch.vtensor<[],f32> -> !torch.float
%2773 = torch.aten.item %2771 : !torch.vtensor<[],si8> -> !torch.int
%2774 = torch.aten._make_per_tensor_quantized_tensor %2769, %2772, %2773 : !torch.vtensor<[1,256,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,80,80],!torch.qint8>
%2775 = torch.aten.dequantize.self %2774 : !torch.vtensor<[1,256,80,80],!torch.qint8> -> !torch.vtensor<[1,256,80,80],f32>
%int1_895 = torch.constant.int 1
%2776 = torch.aten.add.Tensor %2775, %2158, %int1_895 : !torch.vtensor<[1,256,80,80],f32>, !torch.vtensor<[1,256,80,80],f32>, !torch.int -> !torch.vtensor<[1,256,80,80],f32>
%2777 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2778 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_896 = torch.constant.int 12
%2779 = torch.aten.item %2777 : !torch.vtensor<[],f32> -> !torch.float
%2780 = torch.aten.item %2778 : !torch.vtensor<[],si8> -> !torch.int
%2781 = torch.aten.quantize_per_tensor %2776, %2779, %2780, %int12_896 : !torch.vtensor<[1,256,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,80,80],!torch.qint8>
%2782 = torch.aten.int_repr %2781 : !torch.vtensor<[1,256,80,80],!torch.qint8> -> !torch.vtensor<[1,256,80,80],si8>
%2783 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2784 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2785 = torch.aten.item %2783 : !torch.vtensor<[],f32> -> !torch.float
%2786 = torch.aten.item %2784 : !torch.vtensor<[],si8> -> !torch.int
%2787 = torch.aten._make_per_tensor_quantized_tensor %2782, %2785, %2786 : !torch.vtensor<[1,256,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,80,80],!torch.qint8>
%2788 = torch.aten.dequantize.self %2787 : !torch.vtensor<[1,256,80,80],!torch.qint8> -> !torch.vtensor<[1,256,80,80],f32>
%int2_897 = torch.constant.int 2
%int2_898 = torch.constant.int 2
%2789 = torch.prim.ListConstruct %int2_897, %int2_898 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_899 = torch.constant.int 0
%int0_900 = torch.constant.int 0
%2790 = torch.prim.ListConstruct %int0_899, %int0_900 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_901 = torch.constant.int 2
%int2_902 = torch.constant.int 2
%2791 = torch.prim.ListConstruct %int2_901, %int2_902 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_903 = torch.constant.int 1
%int1_904 = torch.constant.int 1
%2792 = torch.prim.ListConstruct %int1_903, %int1_904 : (!torch.int, !torch.int) -> !torch.list<int>
%true_905 = torch.constant.bool true
%2793 = torch.aten.max_pool2d %2788, %2789, %2791, %2790, %2792, %true_905 : !torch.vtensor<[1,256,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,256,40,40],f32>
%2794 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_906 = torch.constant.int 12
%2796 = torch.aten.item %2794 : !torch.vtensor<[],f32> -> !torch.float
%2797 = torch.aten.item %2795 : !torch.vtensor<[],si8> -> !torch.int
%2798 = torch.aten.quantize_per_tensor %2793, %2796, %2797, %int12_906 : !torch.vtensor<[1,256,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%2799 = torch.aten.int_repr %2798 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],si8>
%2800 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2802 = torch.aten.item %2800 : !torch.vtensor<[],f32> -> !torch.float
%2803 = torch.aten.item %2801 : !torch.vtensor<[],si8> -> !torch.int
%2804 = torch.aten._make_per_tensor_quantized_tensor %2799, %2802, %2803 : !torch.vtensor<[1,256,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%2805 = torch.aten.dequantize.self %2804 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],f32>
%2806 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2807 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_907 = torch.constant.int 12
%2808 = torch.aten.item %2806 : !torch.vtensor<[],f32> -> !torch.float
%2809 = torch.aten.item %2807 : !torch.vtensor<[],si8> -> !torch.int
%2810 = torch.aten.quantize_per_tensor %72, %2808, %2809, %int12_907 : !torch.vtensor<[512,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,256,3,3],!torch.qint8>
%2811 = torch.aten.int_repr %2810 : !torch.vtensor<[512,256,3,3],!torch.qint8> -> !torch.vtensor<[512,256,3,3],si8>
%2812 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2813 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2814 = torch.aten.item %2812 : !torch.vtensor<[],f32> -> !torch.float
%2815 = torch.aten.item %2813 : !torch.vtensor<[],si8> -> !torch.int
%2816 = torch.aten._make_per_tensor_quantized_tensor %2811, %2814, %2815 : !torch.vtensor<[512,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,256,3,3],!torch.qint8>
%2817 = torch.aten.dequantize.self %2816 : !torch.vtensor<[512,256,3,3],!torch.qint8> -> !torch.vtensor<[512,256,3,3],f32>
%2818 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2819 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_908 = torch.constant.int 12
%2820 = torch.aten.item %2818 : !torch.vtensor<[],f32> -> !torch.float
%2821 = torch.aten.item %2819 : !torch.vtensor<[],si8> -> !torch.int
%2822 = torch.aten.quantize_per_tensor %73, %2820, %2821, %int12_908 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2823 = torch.aten.int_repr %2822 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2824 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2825 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2826 = torch.aten.item %2824 : !torch.vtensor<[],f32> -> !torch.float
%2827 = torch.aten.item %2825 : !torch.vtensor<[],si8> -> !torch.int
%2828 = torch.aten._make_per_tensor_quantized_tensor %2823, %2826, %2827 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2829 = torch.aten.dequantize.self %2828 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_909 = torch.constant.int 1
%int1_910 = torch.constant.int 1
%int1_911 = torch.constant.int 1
%int1_912 = torch.constant.int 1
%int1_913 = torch.constant.int 1
%int1_914 = torch.constant.int 1
%int0_915 = torch.constant.int 0
%2830 = torch.prim.ListConstruct %int1_909, %int1_910 : (!torch.int, !torch.int) -> !torch.list<int>
%2831 = torch.prim.ListConstruct %int1_911, %int1_912 : (!torch.int, !torch.int) -> !torch.list<int>
%2832 = torch.prim.ListConstruct %int1_913, %int1_914 : (!torch.int, !torch.int) -> !torch.list<int>
%2833 = torch.prim.ListConstruct %int0_915, %int0_915 : (!torch.int, !torch.int) -> !torch.list<int>
%false_916 = torch.constant.bool false
%int1_917 = torch.constant.int 1
%2834 = torch.aten.convolution %2805, %2817, %2829, %2832, %2830, %2831, %false_916, %2833, %int1_917 : !torch.vtensor<[1,256,40,40],f32>, !torch.vtensor<[512,256,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,40,40],f32>
%2835 = torch.aten.relu %2834 : !torch.vtensor<[1,512,40,40],f32> -> !torch.vtensor<[1,512,40,40],f32>
%2836 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2837 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_918 = torch.constant.int 12
%2838 = torch.aten.item %2836 : !torch.vtensor<[],f32> -> !torch.float
%2839 = torch.aten.item %2837 : !torch.vtensor<[],si8> -> !torch.int
%2840 = torch.aten.quantize_per_tensor %2835, %2838, %2839, %int12_918 : !torch.vtensor<[1,512,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,40,40],!torch.qint8>
%2841 = torch.aten.int_repr %2840 : !torch.vtensor<[1,512,40,40],!torch.qint8> -> !torch.vtensor<[1,512,40,40],si8>
%2842 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2843 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2844 = torch.aten.item %2842 : !torch.vtensor<[],f32> -> !torch.float
%2845 = torch.aten.item %2843 : !torch.vtensor<[],si8> -> !torch.int
%2846 = torch.aten._make_per_tensor_quantized_tensor %2841, %2844, %2845 : !torch.vtensor<[1,512,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,40,40],!torch.qint8>
%2847 = torch.aten.dequantize.self %2846 : !torch.vtensor<[1,512,40,40],!torch.qint8> -> !torch.vtensor<[1,512,40,40],f32>
%2848 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2849 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_919 = torch.constant.int 12
%2850 = torch.aten.item %2848 : !torch.vtensor<[],f32> -> !torch.float
%2851 = torch.aten.item %2849 : !torch.vtensor<[],si8> -> !torch.int
%2852 = torch.aten.quantize_per_tensor %74, %2850, %2851, %int12_919 : !torch.vtensor<[128,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,3,3],!torch.qint8>
%2853 = torch.aten.int_repr %2852 : !torch.vtensor<[128,512,3,3],!torch.qint8> -> !torch.vtensor<[128,512,3,3],si8>
%2854 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2855 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2856 = torch.aten.item %2854 : !torch.vtensor<[],f32> -> !torch.float
%2857 = torch.aten.item %2855 : !torch.vtensor<[],si8> -> !torch.int
%2858 = torch.aten._make_per_tensor_quantized_tensor %2853, %2856, %2857 : !torch.vtensor<[128,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,3,3],!torch.qint8>
%2859 = torch.aten.dequantize.self %2858 : !torch.vtensor<[128,512,3,3],!torch.qint8> -> !torch.vtensor<[128,512,3,3],f32>
%2860 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2861 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_920 = torch.constant.int 12
%2862 = torch.aten.item %2860 : !torch.vtensor<[],f32> -> !torch.float
%2863 = torch.aten.item %2861 : !torch.vtensor<[],si8> -> !torch.int
%2864 = torch.aten.quantize_per_tensor %75, %2862, %2863, %int12_920 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2865 = torch.aten.int_repr %2864 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%2866 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2867 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2868 = torch.aten.item %2866 : !torch.vtensor<[],f32> -> !torch.float
%2869 = torch.aten.item %2867 : !torch.vtensor<[],si8> -> !torch.int
%2870 = torch.aten._make_per_tensor_quantized_tensor %2865, %2868, %2869 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2871 = torch.aten.dequantize.self %2870 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_921 = torch.constant.int 1
%int1_922 = torch.constant.int 1
%int1_923 = torch.constant.int 1
%int1_924 = torch.constant.int 1
%int1_925 = torch.constant.int 1
%int1_926 = torch.constant.int 1
%int0_927 = torch.constant.int 0
%2872 = torch.prim.ListConstruct %int1_921, %int1_922 : (!torch.int, !torch.int) -> !torch.list<int>
%2873 = torch.prim.ListConstruct %int1_923, %int1_924 : (!torch.int, !torch.int) -> !torch.list<int>
%2874 = torch.prim.ListConstruct %int1_925, %int1_926 : (!torch.int, !torch.int) -> !torch.list<int>
%2875 = torch.prim.ListConstruct %int0_927, %int0_927 : (!torch.int, !torch.int) -> !torch.list<int>
%false_928 = torch.constant.bool false
%int1_929 = torch.constant.int 1
%2876 = torch.aten.convolution %2847, %2859, %2871, %2874, %2872, %2873, %false_928, %2875, %int1_929 : !torch.vtensor<[1,512,40,40],f32>, !torch.vtensor<[128,512,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,40,40],f32>
%2877 = torch.aten.relu %2876 : !torch.vtensor<[1,128,40,40],f32> -> !torch.vtensor<[1,128,40,40],f32>
%2878 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2879 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_930 = torch.constant.int 12
%2880 = torch.aten.item %2878 : !torch.vtensor<[],f32> -> !torch.float
%2881 = torch.aten.item %2879 : !torch.vtensor<[],si8> -> !torch.int
%2882 = torch.aten.quantize_per_tensor %2877, %2880, %2881, %int12_930 : !torch.vtensor<[1,128,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,40,40],!torch.qint8>
%2883 = torch.aten.int_repr %2882 : !torch.vtensor<[1,128,40,40],!torch.qint8> -> !torch.vtensor<[1,128,40,40],si8>
%2884 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2885 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2886 = torch.aten.item %2884 : !torch.vtensor<[],f32> -> !torch.float
%2887 = torch.aten.item %2885 : !torch.vtensor<[],si8> -> !torch.int
%2888 = torch.aten._make_per_tensor_quantized_tensor %2883, %2886, %2887 : !torch.vtensor<[1,128,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,40,40],!torch.qint8>
%2889 = torch.aten.dequantize.self %2888 : !torch.vtensor<[1,128,40,40],!torch.qint8> -> !torch.vtensor<[1,128,40,40],f32>
%int2_931 = torch.constant.int 2
%int2_932 = torch.constant.int 2
%2890 = torch.prim.ListConstruct %int2_931, %int2_932 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_933 = torch.constant.int 0
%int0_934 = torch.constant.int 0
%2891 = torch.prim.ListConstruct %int0_933, %int0_934 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_935 = torch.constant.int 2
%int2_936 = torch.constant.int 2
%2892 = torch.prim.ListConstruct %int2_935, %int2_936 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_937 = torch.constant.int 1
%int1_938 = torch.constant.int 1
%2893 = torch.prim.ListConstruct %int1_937, %int1_938 : (!torch.int, !torch.int) -> !torch.list<int>
%true_939 = torch.constant.bool true
%2894 = torch.aten.max_pool2d %2889, %2890, %2892, %2891, %2893, %true_939 : !torch.vtensor<[1,128,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,128,20,20],f32>
%2895 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2896 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_940 = torch.constant.int 12
%2897 = torch.aten.item %2895 : !torch.vtensor<[],f32> -> !torch.float
%2898 = torch.aten.item %2896 : !torch.vtensor<[],si8> -> !torch.int
%2899 = torch.aten.quantize_per_tensor %2894, %2897, %2898, %int12_940 : !torch.vtensor<[1,128,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%2900 = torch.aten.int_repr %2899 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],si8>
%2901 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2902 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2903 = torch.aten.item %2901 : !torch.vtensor<[],f32> -> !torch.float
%2904 = torch.aten.item %2902 : !torch.vtensor<[],si8> -> !torch.int
%2905 = torch.aten._make_per_tensor_quantized_tensor %2900, %2903, %2904 : !torch.vtensor<[1,128,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%2906 = torch.aten.dequantize.self %2905 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],f32>
%2907 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2908 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_941 = torch.constant.int 12
%2909 = torch.aten.item %2907 : !torch.vtensor<[],f32> -> !torch.float
%2910 = torch.aten.item %2908 : !torch.vtensor<[],si8> -> !torch.int
%2911 = torch.aten.quantize_per_tensor %76, %2909, %2910, %int12_941 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%2912 = torch.aten.int_repr %2911 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%2913 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2914 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2915 = torch.aten.item %2913 : !torch.vtensor<[],f32> -> !torch.float
%2916 = torch.aten.item %2914 : !torch.vtensor<[],si8> -> !torch.int
%2917 = torch.aten._make_per_tensor_quantized_tensor %2912, %2915, %2916 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%2918 = torch.aten.dequantize.self %2917 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%2919 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2920 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_942 = torch.constant.int 12
%2921 = torch.aten.item %2919 : !torch.vtensor<[],f32> -> !torch.float
%2922 = torch.aten.item %2920 : !torch.vtensor<[],si8> -> !torch.int
%2923 = torch.aten.quantize_per_tensor %77, %2921, %2922, %int12_942 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2924 = torch.aten.int_repr %2923 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%2925 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2926 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2927 = torch.aten.item %2925 : !torch.vtensor<[],f32> -> !torch.float
%2928 = torch.aten.item %2926 : !torch.vtensor<[],si8> -> !torch.int
%2929 = torch.aten._make_per_tensor_quantized_tensor %2924, %2927, %2928 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2930 = torch.aten.dequantize.self %2929 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_943 = torch.constant.int 1
%int1_944 = torch.constant.int 1
%int1_945 = torch.constant.int 1
%int1_946 = torch.constant.int 1
%int1_947 = torch.constant.int 1
%int1_948 = torch.constant.int 1
%int0_949 = torch.constant.int 0
%2931 = torch.prim.ListConstruct %int1_943, %int1_944 : (!torch.int, !torch.int) -> !torch.list<int>
%2932 = torch.prim.ListConstruct %int1_945, %int1_946 : (!torch.int, !torch.int) -> !torch.list<int>
%2933 = torch.prim.ListConstruct %int1_947, %int1_948 : (!torch.int, !torch.int) -> !torch.list<int>
%2934 = torch.prim.ListConstruct %int0_949, %int0_949 : (!torch.int, !torch.int) -> !torch.list<int>
%false_950 = torch.constant.bool false
%int1_951 = torch.constant.int 1
%2935 = torch.aten.convolution %2906, %2918, %2930, %2933, %2931, %2932, %false_950, %2934, %int1_951 : !torch.vtensor<[1,128,20,20],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,20,20],f32>
%2936 = torch.aten.relu %2935 : !torch.vtensor<[1,128,20,20],f32> -> !torch.vtensor<[1,128,20,20],f32>
%2937 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2938 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_952 = torch.constant.int 12
%2939 = torch.aten.item %2937 : !torch.vtensor<[],f32> -> !torch.float
%2940 = torch.aten.item %2938 : !torch.vtensor<[],si8> -> !torch.int
%2941 = torch.aten.quantize_per_tensor %2936, %2939, %2940, %int12_952 : !torch.vtensor<[1,128,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%2942 = torch.aten.int_repr %2941 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],si8>
%2943 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2944 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2945 = torch.aten.item %2943 : !torch.vtensor<[],f32> -> !torch.float
%2946 = torch.aten.item %2944 : !torch.vtensor<[],si8> -> !torch.int
%2947 = torch.aten._make_per_tensor_quantized_tensor %2942, %2945, %2946 : !torch.vtensor<[1,128,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%2948 = torch.aten.dequantize.self %2947 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],f32>
%int2_953 = torch.constant.int 2
%int2_954 = torch.constant.int 2
%2949 = torch.prim.ListConstruct %int2_953, %int2_954 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_955 = torch.constant.int 0
%int0_956 = torch.constant.int 0
%2950 = torch.prim.ListConstruct %int0_955, %int0_956 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_957 = torch.constant.int 2
%int2_958 = torch.constant.int 2
%2951 = torch.prim.ListConstruct %int2_957, %int2_958 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_959 = torch.constant.int 1
%int1_960 = torch.constant.int 1
%2952 = torch.prim.ListConstruct %int1_959, %int1_960 : (!torch.int, !torch.int) -> !torch.list<int>
%true_961 = torch.constant.bool true
%2953 = torch.aten.max_pool2d %2948, %2949, %2951, %2950, %2952, %true_961 : !torch.vtensor<[1,128,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,128,10,10],f32>
%2954 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2955 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_962 = torch.constant.int 12
%2956 = torch.aten.item %2954 : !torch.vtensor<[],f32> -> !torch.float
%2957 = torch.aten.item %2955 : !torch.vtensor<[],si8> -> !torch.int
%2958 = torch.aten.quantize_per_tensor %2953, %2956, %2957, %int12_962 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%2959 = torch.aten.int_repr %2958 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%2960 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2961 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2962 = torch.aten.item %2960 : !torch.vtensor<[],f32> -> !torch.float
%2963 = torch.aten.item %2961 : !torch.vtensor<[],si8> -> !torch.int
%2964 = torch.aten._make_per_tensor_quantized_tensor %2959, %2962, %2963 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%2965 = torch.aten.dequantize.self %2964 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%2966 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2967 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_963 = torch.constant.int 12
%2968 = torch.aten.item %2966 : !torch.vtensor<[],f32> -> !torch.float
%2969 = torch.aten.item %2967 : !torch.vtensor<[],si8> -> !torch.int
%2970 = torch.aten.quantize_per_tensor %78, %2968, %2969, %int12_963 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%2971 = torch.aten.int_repr %2970 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%2972 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2973 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2974 = torch.aten.item %2972 : !torch.vtensor<[],f32> -> !torch.float
%2975 = torch.aten.item %2973 : !torch.vtensor<[],si8> -> !torch.int
%2976 = torch.aten._make_per_tensor_quantized_tensor %2971, %2974, %2975 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%2977 = torch.aten.dequantize.self %2976 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%2978 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2979 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_964 = torch.constant.int 12
%2980 = torch.aten.item %2978 : !torch.vtensor<[],f32> -> !torch.float
%2981 = torch.aten.item %2979 : !torch.vtensor<[],si8> -> !torch.int
%2982 = torch.aten.quantize_per_tensor %79, %2980, %2981, %int12_964 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2983 = torch.aten.int_repr %2982 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%2984 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2985 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2986 = torch.aten.item %2984 : !torch.vtensor<[],f32> -> !torch.float
%2987 = torch.aten.item %2985 : !torch.vtensor<[],si8> -> !torch.int
%2988 = torch.aten._make_per_tensor_quantized_tensor %2983, %2986, %2987 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%2989 = torch.aten.dequantize.self %2988 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_965 = torch.constant.int 1
%int1_966 = torch.constant.int 1
%int1_967 = torch.constant.int 1
%int1_968 = torch.constant.int 1
%int1_969 = torch.constant.int 1
%int1_970 = torch.constant.int 1
%int0_971 = torch.constant.int 0
%2990 = torch.prim.ListConstruct %int1_965, %int1_966 : (!torch.int, !torch.int) -> !torch.list<int>
%2991 = torch.prim.ListConstruct %int1_967, %int1_968 : (!torch.int, !torch.int) -> !torch.list<int>
%2992 = torch.prim.ListConstruct %int1_969, %int1_970 : (!torch.int, !torch.int) -> !torch.list<int>
%2993 = torch.prim.ListConstruct %int0_971, %int0_971 : (!torch.int, !torch.int) -> !torch.list<int>
%false_972 = torch.constant.bool false
%int1_973 = torch.constant.int 1
%2994 = torch.aten.convolution %2965, %2977, %2989, %2992, %2990, %2991, %false_972, %2993, %int1_973 : !torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%2995 = torch.aten.relu %2994 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[1,128,10,10],f32>
%2996 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2997 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_974 = torch.constant.int 12
%2998 = torch.aten.item %2996 : !torch.vtensor<[],f32> -> !torch.float
%2999 = torch.aten.item %2997 : !torch.vtensor<[],si8> -> !torch.int
%3000 = torch.aten.quantize_per_tensor %2995, %2998, %2999, %int12_974 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%3001 = torch.aten.int_repr %3000 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%3002 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3003 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3004 = torch.aten.item %3002 : !torch.vtensor<[],f32> -> !torch.float
%3005 = torch.aten.item %3003 : !torch.vtensor<[],si8> -> !torch.int
%3006 = torch.aten._make_per_tensor_quantized_tensor %3001, %3004, %3005 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%3007 = torch.aten.dequantize.self %3006 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%3008 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3009 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_975 = torch.constant.int 12
%3010 = torch.aten.item %3008 : !torch.vtensor<[],f32> -> !torch.float
%3011 = torch.aten.item %3009 : !torch.vtensor<[],si8> -> !torch.int
%3012 = torch.aten.quantize_per_tensor %80, %3010, %3011, %int12_975 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%3013 = torch.aten.int_repr %3012 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%3014 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3015 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3016 = torch.aten.item %3014 : !torch.vtensor<[],f32> -> !torch.float
%3017 = torch.aten.item %3015 : !torch.vtensor<[],si8> -> !torch.int
%3018 = torch.aten._make_per_tensor_quantized_tensor %3013, %3016, %3017 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%3019 = torch.aten.dequantize.self %3018 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%3020 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3021 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_976 = torch.constant.int 12
%3022 = torch.aten.item %3020 : !torch.vtensor<[],f32> -> !torch.float
%3023 = torch.aten.item %3021 : !torch.vtensor<[],si8> -> !torch.int
%3024 = torch.aten.quantize_per_tensor %81, %3022, %3023, %int12_976 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3025 = torch.aten.int_repr %3024 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%3026 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3027 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3028 = torch.aten.item %3026 : !torch.vtensor<[],f32> -> !torch.float
%3029 = torch.aten.item %3027 : !torch.vtensor<[],si8> -> !torch.int
%3030 = torch.aten._make_per_tensor_quantized_tensor %3025, %3028, %3029 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3031 = torch.aten.dequantize.self %3030 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int2_977 = torch.constant.int 2
%int2_978 = torch.constant.int 2
%int2_979 = torch.constant.int 2
%int2_980 = torch.constant.int 2
%int1_981 = torch.constant.int 1
%int1_982 = torch.constant.int 1
%int0_983 = torch.constant.int 0
%3032 = torch.prim.ListConstruct %int2_977, %int2_978 : (!torch.int, !torch.int) -> !torch.list<int>
%3033 = torch.prim.ListConstruct %int2_979, %int2_980 : (!torch.int, !torch.int) -> !torch.list<int>
%3034 = torch.prim.ListConstruct %int1_981, %int1_982 : (!torch.int, !torch.int) -> !torch.list<int>
%3035 = torch.prim.ListConstruct %int0_983, %int0_983 : (!torch.int, !torch.int) -> !torch.list<int>
%false_984 = torch.constant.bool false
%int1_985 = torch.constant.int 1
%3036 = torch.aten.convolution %3007, %3019, %3031, %3034, %3032, %3033, %false_984, %3035, %int1_985 : !torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%3037 = torch.aten.relu %3036 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[1,128,10,10],f32>
%3038 = torch.prim.ListConstruct %3037, %3007 : (!torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[1,128,10,10],f32>) -> !torch.list<vtensor>
%int1_986 = torch.constant.int 1
%3039 = torch.aten.cat %3038, %int1_986 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3040 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3041 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_987 = torch.constant.int 12
%3042 = torch.aten.item %3040 : !torch.vtensor<[],f32> -> !torch.float
%3043 = torch.aten.item %3041 : !torch.vtensor<[],si8> -> !torch.int
%3044 = torch.aten.quantize_per_tensor %3039, %3042, %3043, %int12_987 : !torch.vtensor<[1,256,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3045 = torch.aten.int_repr %3044 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],si8>
%3046 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3047 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3048 = torch.aten.item %3046 : !torch.vtensor<[],f32> -> !torch.float
%3049 = torch.aten.item %3047 : !torch.vtensor<[],si8> -> !torch.int
%3050 = torch.aten._make_per_tensor_quantized_tensor %3045, %3048, %3049 : !torch.vtensor<[1,256,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3051 = torch.aten.dequantize.self %3050 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],f32>
%3052 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3053 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_988 = torch.constant.int 12
%3054 = torch.aten.item %3052 : !torch.vtensor<[],f32> -> !torch.float
%3055 = torch.aten.item %3053 : !torch.vtensor<[],si8> -> !torch.int
%3056 = torch.aten.quantize_per_tensor %82, %3054, %3055, %int12_988 : !torch.vtensor<[128,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%3057 = torch.aten.int_repr %3056 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],si8>
%3058 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3059 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3060 = torch.aten.item %3058 : !torch.vtensor<[],f32> -> !torch.float
%3061 = torch.aten.item %3059 : !torch.vtensor<[],si8> -> !torch.int
%3062 = torch.aten._make_per_tensor_quantized_tensor %3057, %3060, %3061 : !torch.vtensor<[128,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%3063 = torch.aten.dequantize.self %3062 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],f32>
%3064 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3065 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_989 = torch.constant.int 12
%3066 = torch.aten.item %3064 : !torch.vtensor<[],f32> -> !torch.float
%3067 = torch.aten.item %3065 : !torch.vtensor<[],si8> -> !torch.int
%3068 = torch.aten.quantize_per_tensor %83, %3066, %3067, %int12_989 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3069 = torch.aten.int_repr %3068 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%3070 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3071 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3072 = torch.aten.item %3070 : !torch.vtensor<[],f32> -> !torch.float
%3073 = torch.aten.item %3071 : !torch.vtensor<[],si8> -> !torch.int
%3074 = torch.aten._make_per_tensor_quantized_tensor %3069, %3072, %3073 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3075 = torch.aten.dequantize.self %3074 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_990 = torch.constant.int 1
%int1_991 = torch.constant.int 1
%int1_992 = torch.constant.int 1
%int1_993 = torch.constant.int 1
%int1_994 = torch.constant.int 1
%int1_995 = torch.constant.int 1
%int0_996 = torch.constant.int 0
%3076 = torch.prim.ListConstruct %int1_990, %int1_991 : (!torch.int, !torch.int) -> !torch.list<int>
%3077 = torch.prim.ListConstruct %int1_992, %int1_993 : (!torch.int, !torch.int) -> !torch.list<int>
%3078 = torch.prim.ListConstruct %int1_994, %int1_995 : (!torch.int, !torch.int) -> !torch.list<int>
%3079 = torch.prim.ListConstruct %int0_996, %int0_996 : (!torch.int, !torch.int) -> !torch.list<int>
%false_997 = torch.constant.bool false
%int1_998 = torch.constant.int 1
%3080 = torch.aten.convolution %3051, %3063, %3075, %3078, %3076, %3077, %false_997, %3079, %int1_998 : !torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%3081 = torch.aten.relu %3080 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[1,128,10,10],f32>
%3082 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3083 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_999 = torch.constant.int 12
%3084 = torch.aten.item %3082 : !torch.vtensor<[],f32> -> !torch.float
%3085 = torch.aten.item %3083 : !torch.vtensor<[],si8> -> !torch.int
%3086 = torch.aten.quantize_per_tensor %3081, %3084, %3085, %int12_999 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%3087 = torch.aten.int_repr %3086 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%3088 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3089 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3090 = torch.aten.item %3088 : !torch.vtensor<[],f32> -> !torch.float
%3091 = torch.aten.item %3089 : !torch.vtensor<[],si8> -> !torch.int
%3092 = torch.aten._make_per_tensor_quantized_tensor %3087, %3090, %3091 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%3093 = torch.aten.dequantize.self %3092 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%3094 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%3095 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%3096 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1000 = torch.constant.int 0
%int0_1001 = torch.constant.int 0
%int0_1002 = torch.constant.int 0
%3097 = torch.aten.select.int %3096, %int0_1000, %int0_1002 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3098 = torch.aten.item %3097 : !torch.vtensor<[1],si64> -> !torch.int
%3099 = torch.aten.lt.int %3098, %int0_1000 : !torch.int, !torch.int -> !torch.bool
%3100 = torch.aten.Int.bool %3099 : !torch.bool -> !torch.int
%3101 = torch.aten.mul.int %3100, %int0_1001 : !torch.int, !torch.int -> !torch.int
%3102 = torch.aten.add.int %3098, %3101 : !torch.int, !torch.int -> !torch.int
%3103 = torch.prim.ListConstruct %3102 : (!torch.int) -> !torch.list<int>
%false_1003 = torch.constant.bool false
%none_1004 = torch.constant.none
%3104 = torch.aten.tensor %3103, %none_1004, %none_1004, %false_1003 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1005, %indices_1006 = torch.aten.sort %3104, %int0_1000, %false_1003 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1007 = torch.constant.int 0
%3105 = torch.aten.select.int %values_1005, %int0_1000, %int0_1007 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3106 = torch.aten.item %3105 : !torch.vtensor<[1],si64> -> !torch.int
%3107 = torch.aten.unsqueeze %3094, %3106 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%3108 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1008 = torch.constant.int 0
%int0_1009 = torch.constant.int 0
%int0_1010 = torch.constant.int 0
%3109 = torch.aten.select.int %3108, %int0_1008, %int0_1010 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3110 = torch.aten.item %3109 : !torch.vtensor<[1],si64> -> !torch.int
%3111 = torch.aten.lt.int %3110, %int0_1008 : !torch.int, !torch.int -> !torch.bool
%3112 = torch.aten.Int.bool %3111 : !torch.bool -> !torch.int
%3113 = torch.aten.mul.int %3112, %int0_1009 : !torch.int, !torch.int -> !torch.int
%3114 = torch.aten.add.int %3110, %3113 : !torch.int, !torch.int -> !torch.int
%3115 = torch.prim.ListConstruct %3114 : (!torch.int) -> !torch.list<int>
%false_1011 = torch.constant.bool false
%none_1012 = torch.constant.none
%3116 = torch.aten.tensor %3115, %none_1012, %none_1012, %false_1011 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1013, %indices_1014 = torch.aten.sort %3116, %int0_1008, %false_1011 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1015 = torch.constant.int 0
%3117 = torch.aten.select.int %values_1013, %int0_1008, %int0_1015 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3118 = torch.aten.item %3117 : !torch.vtensor<[1],si64> -> !torch.int
%3119 = torch.aten.unsqueeze %3095, %3118 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%3120 = torch.prim.ListConstruct %3107, %3119 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1016 = torch.constant.int 0
%3121 = torch.aten.cat %3120, %int0_1016 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%3122 = torch.aten._shape_as_tensor %3093 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[4],si64>
%3123 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3124 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3125 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1017 = torch.constant.none
%int1_1018 = torch.constant.int 1
%3126 = torch.prim.ListConstruct %int1_1018 : (!torch.int) -> !torch.list<int>
%3127 = torch.aten.ones %3126, %none_1017, %none_1017, %none_1017, %none_1017 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1019 = torch.constant.int 0
%int0_1020 = torch.constant.int 0
%3128 = torch.prim.NumToTensor.Scalar %int0_1020 : !torch.int -> !torch.vtensor<[1],si64>
%3129 = torch.aten.index_select %3124, %int0_1019, %3128 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3130 = torch.aten.item %3129 : !torch.vtensor<[1],si64> -> !torch.int
%3131 = torch.aten.index_select %3125, %int0_1019, %3128 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3132 = torch.aten.item %3131 : !torch.vtensor<[1],si64> -> !torch.int
%3133 = torch.aten.index_select %3123, %int0_1019, %3128 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3134 = torch.aten.item %3133 : !torch.vtensor<[1],si64> -> !torch.int
%3135 = torch.aten.index_select %3127, %int0_1019, %3128 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3136 = torch.aten.item %3135 : !torch.vtensor<[1],si64> -> !torch.int
%3137 = torch.aten.slice.Tensor %3122, %3134, %3130, %3132, %3136 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1021 = torch.constant.int 4
%none_1022 = torch.constant.none
%false_1023 = torch.constant.bool false
%3138 = torch.aten.to.dtype %3121, %int4_1021, %false_1023, %false_1023, %none_1022 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%3139 = torch.prim.ListConstruct %3137, %3138 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1024 = torch.constant.int 0
%3140 = torch.aten.cat %3139, %int0_1024 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%3141 = torch.operator "onnx.Resize"(%3093, %none, %none, %3140) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,128,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%3142 = torch.prim.ListConstruct %3141, %2948 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,128,20,20],f32>) -> !torch.list<vtensor>
%int1_1025 = torch.constant.int 1
%3143 = torch.aten.cat %3142, %int1_1025 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%3144 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3145 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1026 = torch.constant.int 12
%3146 = torch.aten.item %3144 : !torch.vtensor<[],f32> -> !torch.float
%3147 = torch.aten.item %3145 : !torch.vtensor<[],si8> -> !torch.int
%3148 = torch.aten.quantize_per_tensor %3143, %3146, %3147, %int12_1026 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%3149 = torch.aten.int_repr %3148 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%3150 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3151 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3152 = torch.aten.item %3150 : !torch.vtensor<[],f32> -> !torch.float
%3153 = torch.aten.item %3151 : !torch.vtensor<[],si8> -> !torch.int
%3154 = torch.aten._make_per_tensor_quantized_tensor %3149, %3152, %3153 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%3155 = torch.aten.dequantize.self %3154 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%3156 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3157 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1027 = torch.constant.int 12
%3158 = torch.aten.item %3156 : !torch.vtensor<[],f32> -> !torch.float
%3159 = torch.aten.item %3157 : !torch.vtensor<[],si8> -> !torch.int
%3160 = torch.aten.quantize_per_tensor %84, %3158, %3159, %int12_1027 : !torch.vtensor<[128,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%3161 = torch.aten.int_repr %3160 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],si8>
%3162 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3163 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3164 = torch.aten.item %3162 : !torch.vtensor<[],f32> -> !torch.float
%3165 = torch.aten.item %3163 : !torch.vtensor<[],si8> -> !torch.int
%3166 = torch.aten._make_per_tensor_quantized_tensor %3161, %3164, %3165 : !torch.vtensor<[128,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%3167 = torch.aten.dequantize.self %3166 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],f32>
%3168 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3169 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1028 = torch.constant.int 12
%3170 = torch.aten.item %3168 : !torch.vtensor<[],f32> -> !torch.float
%3171 = torch.aten.item %3169 : !torch.vtensor<[],si8> -> !torch.int
%3172 = torch.aten.quantize_per_tensor %85, %3170, %3171, %int12_1028 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3173 = torch.aten.int_repr %3172 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%3174 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3175 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3176 = torch.aten.item %3174 : !torch.vtensor<[],f32> -> !torch.float
%3177 = torch.aten.item %3175 : !torch.vtensor<[],si8> -> !torch.int
%3178 = torch.aten._make_per_tensor_quantized_tensor %3173, %3176, %3177 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3179 = torch.aten.dequantize.self %3178 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1029 = torch.constant.int 1
%int1_1030 = torch.constant.int 1
%int1_1031 = torch.constant.int 1
%int1_1032 = torch.constant.int 1
%int1_1033 = torch.constant.int 1
%int1_1034 = torch.constant.int 1
%int0_1035 = torch.constant.int 0
%3180 = torch.prim.ListConstruct %int1_1029, %int1_1030 : (!torch.int, !torch.int) -> !torch.list<int>
%3181 = torch.prim.ListConstruct %int1_1031, %int1_1032 : (!torch.int, !torch.int) -> !torch.list<int>
%3182 = torch.prim.ListConstruct %int1_1033, %int1_1034 : (!torch.int, !torch.int) -> !torch.list<int>
%3183 = torch.prim.ListConstruct %int0_1035, %int0_1035 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1036 = torch.constant.bool false
%int1_1037 = torch.constant.int 1
%3184 = torch.aten.convolution %3155, %3167, %3179, %3182, %3180, %3181, %false_1036, %3183, %int1_1037 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,20,20],f32>
%3185 = torch.aten.relu %3184 : !torch.vtensor<[1,128,20,20],f32> -> !torch.vtensor<[1,128,20,20],f32>
%3186 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3187 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1038 = torch.constant.int 12
%3188 = torch.aten.item %3186 : !torch.vtensor<[],f32> -> !torch.float
%3189 = torch.aten.item %3187 : !torch.vtensor<[],si8> -> !torch.int
%3190 = torch.aten.quantize_per_tensor %3185, %3188, %3189, %int12_1038 : !torch.vtensor<[1,128,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%3191 = torch.aten.int_repr %3190 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],si8>
%3192 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3193 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3194 = torch.aten.item %3192 : !torch.vtensor<[],f32> -> !torch.float
%3195 = torch.aten.item %3193 : !torch.vtensor<[],si8> -> !torch.int
%3196 = torch.aten._make_per_tensor_quantized_tensor %3191, %3194, %3195 : !torch.vtensor<[1,128,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%3197 = torch.aten.dequantize.self %3196 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],f32>
%3198 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%3199 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%3200 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1039 = torch.constant.int 0
%int0_1040 = torch.constant.int 0
%int0_1041 = torch.constant.int 0
%3201 = torch.aten.select.int %3200, %int0_1039, %int0_1041 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3202 = torch.aten.item %3201 : !torch.vtensor<[1],si64> -> !torch.int
%3203 = torch.aten.lt.int %3202, %int0_1039 : !torch.int, !torch.int -> !torch.bool
%3204 = torch.aten.Int.bool %3203 : !torch.bool -> !torch.int
%3205 = torch.aten.mul.int %3204, %int0_1040 : !torch.int, !torch.int -> !torch.int
%3206 = torch.aten.add.int %3202, %3205 : !torch.int, !torch.int -> !torch.int
%3207 = torch.prim.ListConstruct %3206 : (!torch.int) -> !torch.list<int>
%false_1042 = torch.constant.bool false
%none_1043 = torch.constant.none
%3208 = torch.aten.tensor %3207, %none_1043, %none_1043, %false_1042 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1044, %indices_1045 = torch.aten.sort %3208, %int0_1039, %false_1042 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1046 = torch.constant.int 0
%3209 = torch.aten.select.int %values_1044, %int0_1039, %int0_1046 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3210 = torch.aten.item %3209 : !torch.vtensor<[1],si64> -> !torch.int
%3211 = torch.aten.unsqueeze %3198, %3210 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%3212 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1047 = torch.constant.int 0
%int0_1048 = torch.constant.int 0
%int0_1049 = torch.constant.int 0
%3213 = torch.aten.select.int %3212, %int0_1047, %int0_1049 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3214 = torch.aten.item %3213 : !torch.vtensor<[1],si64> -> !torch.int
%3215 = torch.aten.lt.int %3214, %int0_1047 : !torch.int, !torch.int -> !torch.bool
%3216 = torch.aten.Int.bool %3215 : !torch.bool -> !torch.int
%3217 = torch.aten.mul.int %3216, %int0_1048 : !torch.int, !torch.int -> !torch.int
%3218 = torch.aten.add.int %3214, %3217 : !torch.int, !torch.int -> !torch.int
%3219 = torch.prim.ListConstruct %3218 : (!torch.int) -> !torch.list<int>
%false_1050 = torch.constant.bool false
%none_1051 = torch.constant.none
%3220 = torch.aten.tensor %3219, %none_1051, %none_1051, %false_1050 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1052, %indices_1053 = torch.aten.sort %3220, %int0_1047, %false_1050 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1054 = torch.constant.int 0
%3221 = torch.aten.select.int %values_1052, %int0_1047, %int0_1054 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%3222 = torch.aten.item %3221 : !torch.vtensor<[1],si64> -> !torch.int
%3223 = torch.aten.unsqueeze %3199, %3222 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%3224 = torch.prim.ListConstruct %3211, %3223 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1055 = torch.constant.int 0
%3225 = torch.aten.cat %3224, %int0_1055 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%3226 = torch.aten._shape_as_tensor %3197 : !torch.vtensor<[1,128,20,20],f32> -> !torch.vtensor<[4],si64>
%3227 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3228 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%3229 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1056 = torch.constant.none
%int1_1057 = torch.constant.int 1
%3230 = torch.prim.ListConstruct %int1_1057 : (!torch.int) -> !torch.list<int>
%3231 = torch.aten.ones %3230, %none_1056, %none_1056, %none_1056, %none_1056 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1058 = torch.constant.int 0
%int0_1059 = torch.constant.int 0
%3232 = torch.prim.NumToTensor.Scalar %int0_1059 : !torch.int -> !torch.vtensor<[1],si64>
%3233 = torch.aten.index_select %3228, %int0_1058, %3232 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3234 = torch.aten.item %3233 : !torch.vtensor<[1],si64> -> !torch.int
%3235 = torch.aten.index_select %3229, %int0_1058, %3232 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3236 = torch.aten.item %3235 : !torch.vtensor<[1],si64> -> !torch.int
%3237 = torch.aten.index_select %3227, %int0_1058, %3232 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3238 = torch.aten.item %3237 : !torch.vtensor<[1],si64> -> !torch.int
%3239 = torch.aten.index_select %3231, %int0_1058, %3232 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%3240 = torch.aten.item %3239 : !torch.vtensor<[1],si64> -> !torch.int
%3241 = torch.aten.slice.Tensor %3226, %3238, %3234, %3236, %3240 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1060 = torch.constant.int 4
%none_1061 = torch.constant.none
%false_1062 = torch.constant.bool false
%3242 = torch.aten.to.dtype %3225, %int4_1060, %false_1062, %false_1062, %none_1061 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%3243 = torch.prim.ListConstruct %3241, %3242 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1063 = torch.constant.int 0
%3244 = torch.aten.cat %3243, %int0_1063 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%3245 = torch.operator "onnx.Resize"(%3197, %none, %none, %3244) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,128,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%3246 = torch.prim.ListConstruct %3245, %2889 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,128,40,40],f32>) -> !torch.list<vtensor>
%int1_1064 = torch.constant.int 1
%3247 = torch.aten.cat %3246, %int1_1064 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%3248 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3249 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1065 = torch.constant.int 12
%3250 = torch.aten.item %3248 : !torch.vtensor<[],f32> -> !torch.float
%3251 = torch.aten.item %3249 : !torch.vtensor<[],si8> -> !torch.int
%3252 = torch.aten.quantize_per_tensor %3247, %3250, %3251, %int12_1065 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%3253 = torch.aten.int_repr %3252 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%3254 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3255 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3256 = torch.aten.item %3254 : !torch.vtensor<[],f32> -> !torch.float
%3257 = torch.aten.item %3255 : !torch.vtensor<[],si8> -> !torch.int
%3258 = torch.aten._make_per_tensor_quantized_tensor %3253, %3256, %3257 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%3259 = torch.aten.dequantize.self %3258 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%3260 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3261 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1066 = torch.constant.int 12
%3262 = torch.aten.item %3260 : !torch.vtensor<[],f32> -> !torch.float
%3263 = torch.aten.item %3261 : !torch.vtensor<[],si8> -> !torch.int
%3264 = torch.aten.quantize_per_tensor %86, %3262, %3263, %int12_1066 : !torch.vtensor<[512,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,256,3,3],!torch.qint8>
%3265 = torch.aten.int_repr %3264 : !torch.vtensor<[512,256,3,3],!torch.qint8> -> !torch.vtensor<[512,256,3,3],si8>
%3266 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3267 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3268 = torch.aten.item %3266 : !torch.vtensor<[],f32> -> !torch.float
%3269 = torch.aten.item %3267 : !torch.vtensor<[],si8> -> !torch.int
%3270 = torch.aten._make_per_tensor_quantized_tensor %3265, %3268, %3269 : !torch.vtensor<[512,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,256,3,3],!torch.qint8>
%3271 = torch.aten.dequantize.self %3270 : !torch.vtensor<[512,256,3,3],!torch.qint8> -> !torch.vtensor<[512,256,3,3],f32>
%3272 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3273 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1067 = torch.constant.int 12
%3274 = torch.aten.item %3272 : !torch.vtensor<[],f32> -> !torch.float
%3275 = torch.aten.item %3273 : !torch.vtensor<[],si8> -> !torch.int
%3276 = torch.aten.quantize_per_tensor %87, %3274, %3275, %int12_1067 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3277 = torch.aten.int_repr %3276 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%3278 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3279 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3280 = torch.aten.item %3278 : !torch.vtensor<[],f32> -> !torch.float
%3281 = torch.aten.item %3279 : !torch.vtensor<[],si8> -> !torch.int
%3282 = torch.aten._make_per_tensor_quantized_tensor %3277, %3280, %3281 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3283 = torch.aten.dequantize.self %3282 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1068 = torch.constant.int 1
%int1_1069 = torch.constant.int 1
%int1_1070 = torch.constant.int 1
%int1_1071 = torch.constant.int 1
%int1_1072 = torch.constant.int 1
%int1_1073 = torch.constant.int 1
%int0_1074 = torch.constant.int 0
%3284 = torch.prim.ListConstruct %int1_1068, %int1_1069 : (!torch.int, !torch.int) -> !torch.list<int>
%3285 = torch.prim.ListConstruct %int1_1070, %int1_1071 : (!torch.int, !torch.int) -> !torch.list<int>
%3286 = torch.prim.ListConstruct %int1_1072, %int1_1073 : (!torch.int, !torch.int) -> !torch.list<int>
%3287 = torch.prim.ListConstruct %int0_1074, %int0_1074 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1075 = torch.constant.bool false
%int1_1076 = torch.constant.int 1
%3288 = torch.aten.convolution %3259, %3271, %3283, %3286, %3284, %3285, %false_1075, %3287, %int1_1076 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[512,256,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,40,40],f32>
%3289 = torch.aten.relu %3288 : !torch.vtensor<[1,512,40,40],f32> -> !torch.vtensor<[1,512,40,40],f32>
%3290 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3291 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1077 = torch.constant.int 12
%3292 = torch.aten.item %3290 : !torch.vtensor<[],f32> -> !torch.float
%3293 = torch.aten.item %3291 : !torch.vtensor<[],si8> -> !torch.int
%3294 = torch.aten.quantize_per_tensor %3289, %3292, %3293, %int12_1077 : !torch.vtensor<[1,512,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,40,40],!torch.qint8>
%3295 = torch.aten.int_repr %3294 : !torch.vtensor<[1,512,40,40],!torch.qint8> -> !torch.vtensor<[1,512,40,40],si8>
%3296 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3297 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3298 = torch.aten.item %3296 : !torch.vtensor<[],f32> -> !torch.float
%3299 = torch.aten.item %3297 : !torch.vtensor<[],si8> -> !torch.int
%3300 = torch.aten._make_per_tensor_quantized_tensor %3295, %3298, %3299 : !torch.vtensor<[1,512,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,40,40],!torch.qint8>
%3301 = torch.aten.dequantize.self %3300 : !torch.vtensor<[1,512,40,40],!torch.qint8> -> !torch.vtensor<[1,512,40,40],f32>
%int1_1078 = torch.constant.int 1
%3302 = torch.aten.add.Tensor %3301, %2847, %int1_1078 : !torch.vtensor<[1,512,40,40],f32>, !torch.vtensor<[1,512,40,40],f32>, !torch.int -> !torch.vtensor<[1,512,40,40],f32>
%3303 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1079 = torch.constant.int 12
%3305 = torch.aten.item %3303 : !torch.vtensor<[],f32> -> !torch.float
%3306 = torch.aten.item %3304 : !torch.vtensor<[],si8> -> !torch.int
%3307 = torch.aten.quantize_per_tensor %3302, %3305, %3306, %int12_1079 : !torch.vtensor<[1,512,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,40,40],!torch.qint8>
%3308 = torch.aten.int_repr %3307 : !torch.vtensor<[1,512,40,40],!torch.qint8> -> !torch.vtensor<[1,512,40,40],si8>
%3309 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3311 = torch.aten.item %3309 : !torch.vtensor<[],f32> -> !torch.float
%3312 = torch.aten.item %3310 : !torch.vtensor<[],si8> -> !torch.int
%3313 = torch.aten._make_per_tensor_quantized_tensor %3308, %3311, %3312 : !torch.vtensor<[1,512,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,40,40],!torch.qint8>
%3314 = torch.aten.dequantize.self %3313 : !torch.vtensor<[1,512,40,40],!torch.qint8> -> !torch.vtensor<[1,512,40,40],f32>
%int2_1080 = torch.constant.int 2
%int2_1081 = torch.constant.int 2
%3315 = torch.prim.ListConstruct %int2_1080, %int2_1081 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1082 = torch.constant.int 0
%int0_1083 = torch.constant.int 0
%3316 = torch.prim.ListConstruct %int0_1082, %int0_1083 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1084 = torch.constant.int 2
%int2_1085 = torch.constant.int 2
%3317 = torch.prim.ListConstruct %int2_1084, %int2_1085 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1086 = torch.constant.int 1
%int1_1087 = torch.constant.int 1
%3318 = torch.prim.ListConstruct %int1_1086, %int1_1087 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1088 = torch.constant.bool true
%3319 = torch.aten.max_pool2d %3314, %3315, %3317, %3316, %3318, %true_1088 : !torch.vtensor<[1,512,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,512,20,20],f32>
%3320 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3321 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1089 = torch.constant.int 12
%3322 = torch.aten.item %3320 : !torch.vtensor<[],f32> -> !torch.float
%3323 = torch.aten.item %3321 : !torch.vtensor<[],si8> -> !torch.int
%3324 = torch.aten.quantize_per_tensor %3319, %3322, %3323, %int12_1089 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3325 = torch.aten.int_repr %3324 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3326 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3327 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3328 = torch.aten.item %3326 : !torch.vtensor<[],f32> -> !torch.float
%3329 = torch.aten.item %3327 : !torch.vtensor<[],si8> -> !torch.int
%3330 = torch.aten._make_per_tensor_quantized_tensor %3325, %3328, %3329 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3331 = torch.aten.dequantize.self %3330 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%3332 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3333 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1090 = torch.constant.int 12
%3334 = torch.aten.item %3332 : !torch.vtensor<[],f32> -> !torch.float
%3335 = torch.aten.item %3333 : !torch.vtensor<[],si8> -> !torch.int
%3336 = torch.aten.quantize_per_tensor %88, %3334, %3335, %int12_1090 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%3337 = torch.aten.int_repr %3336 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%3338 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3339 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3340 = torch.aten.item %3338 : !torch.vtensor<[],f32> -> !torch.float
%3341 = torch.aten.item %3339 : !torch.vtensor<[],si8> -> !torch.int
%3342 = torch.aten._make_per_tensor_quantized_tensor %3337, %3340, %3341 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%3343 = torch.aten.dequantize.self %3342 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%3344 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3345 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1091 = torch.constant.int 12
%3346 = torch.aten.item %3344 : !torch.vtensor<[],f32> -> !torch.float
%3347 = torch.aten.item %3345 : !torch.vtensor<[],si8> -> !torch.int
%3348 = torch.aten.quantize_per_tensor %89, %3346, %3347, %int12_1091 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3349 = torch.aten.int_repr %3348 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%3350 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3351 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3352 = torch.aten.item %3350 : !torch.vtensor<[],f32> -> !torch.float
%3353 = torch.aten.item %3351 : !torch.vtensor<[],si8> -> !torch.int
%3354 = torch.aten._make_per_tensor_quantized_tensor %3349, %3352, %3353 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3355 = torch.aten.dequantize.self %3354 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1092 = torch.constant.int 1
%int1_1093 = torch.constant.int 1
%int1_1094 = torch.constant.int 1
%int1_1095 = torch.constant.int 1
%int1_1096 = torch.constant.int 1
%int1_1097 = torch.constant.int 1
%int0_1098 = torch.constant.int 0
%3356 = torch.prim.ListConstruct %int1_1092, %int1_1093 : (!torch.int, !torch.int) -> !torch.list<int>
%3357 = torch.prim.ListConstruct %int1_1094, %int1_1095 : (!torch.int, !torch.int) -> !torch.list<int>
%3358 = torch.prim.ListConstruct %int1_1096, %int1_1097 : (!torch.int, !torch.int) -> !torch.list<int>
%3359 = torch.prim.ListConstruct %int0_1098, %int0_1098 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1099 = torch.constant.bool false
%int1_1100 = torch.constant.int 1
%3360 = torch.aten.convolution %3331, %3343, %3355, %3358, %3356, %3357, %false_1099, %3359, %int1_1100 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%3361 = torch.aten.relu %3360 : !torch.vtensor<[1,512,20,20],f32> -> !torch.vtensor<[1,512,20,20],f32>
%3362 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3363 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1101 = torch.constant.int 12
%3364 = torch.aten.item %3362 : !torch.vtensor<[],f32> -> !torch.float
%3365 = torch.aten.item %3363 : !torch.vtensor<[],si8> -> !torch.int
%3366 = torch.aten.quantize_per_tensor %3361, %3364, %3365, %int12_1101 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3367 = torch.aten.int_repr %3366 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3368 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3369 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3370 = torch.aten.item %3368 : !torch.vtensor<[],f32> -> !torch.float
%3371 = torch.aten.item %3369 : !torch.vtensor<[],si8> -> !torch.int
%3372 = torch.aten._make_per_tensor_quantized_tensor %3367, %3370, %3371 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3373 = torch.aten.dequantize.self %3372 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%3374 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3375 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1102 = torch.constant.int 12
%3376 = torch.aten.item %3374 : !torch.vtensor<[],f32> -> !torch.float
%3377 = torch.aten.item %3375 : !torch.vtensor<[],si8> -> !torch.int
%3378 = torch.aten.quantize_per_tensor %90, %3376, %3377, %int12_1102 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3379 = torch.aten.int_repr %3378 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%3380 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3381 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3382 = torch.aten.item %3380 : !torch.vtensor<[],f32> -> !torch.float
%3383 = torch.aten.item %3381 : !torch.vtensor<[],si8> -> !torch.int
%3384 = torch.aten._make_per_tensor_quantized_tensor %3379, %3382, %3383 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3385 = torch.aten.dequantize.self %3384 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%3386 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3387 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1103 = torch.constant.int 12
%3388 = torch.aten.item %3386 : !torch.vtensor<[],f32> -> !torch.float
%3389 = torch.aten.item %3387 : !torch.vtensor<[],si8> -> !torch.int
%3390 = torch.aten.quantize_per_tensor %91, %3388, %3389, %int12_1103 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3391 = torch.aten.int_repr %3390 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3392 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3393 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3394 = torch.aten.item %3392 : !torch.vtensor<[],f32> -> !torch.float
%3395 = torch.aten.item %3393 : !torch.vtensor<[],si8> -> !torch.int
%3396 = torch.aten._make_per_tensor_quantized_tensor %3391, %3394, %3395 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3397 = torch.aten.dequantize.self %3396 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_1104 = torch.constant.int 1
%int1_1105 = torch.constant.int 1
%int1_1106 = torch.constant.int 1
%int1_1107 = torch.constant.int 1
%int1_1108 = torch.constant.int 1
%int1_1109 = torch.constant.int 1
%int0_1110 = torch.constant.int 0
%3398 = torch.prim.ListConstruct %int1_1104, %int1_1105 : (!torch.int, !torch.int) -> !torch.list<int>
%3399 = torch.prim.ListConstruct %int1_1106, %int1_1107 : (!torch.int, !torch.int) -> !torch.list<int>
%3400 = torch.prim.ListConstruct %int1_1108, %int1_1109 : (!torch.int, !torch.int) -> !torch.list<int>
%3401 = torch.prim.ListConstruct %int0_1110, %int0_1110 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1111 = torch.constant.bool false
%int1_1112 = torch.constant.int 1
%3402 = torch.aten.convolution %3373, %3385, %3397, %3400, %3398, %3399, %false_1111, %3401, %int1_1112 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%3403 = torch.aten.relu %3402 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%3404 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3405 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1113 = torch.constant.int 12
%3406 = torch.aten.item %3404 : !torch.vtensor<[],f32> -> !torch.float
%3407 = torch.aten.item %3405 : !torch.vtensor<[],si8> -> !torch.int
%3408 = torch.aten.quantize_per_tensor %3403, %3406, %3407, %int12_1113 : !torch.vtensor<[1,256,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%3409 = torch.aten.int_repr %3408 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],si8>
%3410 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3411 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3412 = torch.aten.item %3410 : !torch.vtensor<[],f32> -> !torch.float
%3413 = torch.aten.item %3411 : !torch.vtensor<[],si8> -> !torch.int
%3414 = torch.aten._make_per_tensor_quantized_tensor %3409, %3412, %3413 : !torch.vtensor<[1,256,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%3415 = torch.aten.dequantize.self %3414 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],f32>
%3416 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1114 = torch.constant.int 12
%3418 = torch.aten.item %3416 : !torch.vtensor<[],f32> -> !torch.float
%3419 = torch.aten.item %3417 : !torch.vtensor<[],si8> -> !torch.int
%3420 = torch.aten.quantize_per_tensor %92, %3418, %3419, %int12_1114 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3421 = torch.aten.int_repr %3420 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%3422 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3424 = torch.aten.item %3422 : !torch.vtensor<[],f32> -> !torch.float
%3425 = torch.aten.item %3423 : !torch.vtensor<[],si8> -> !torch.int
%3426 = torch.aten._make_per_tensor_quantized_tensor %3421, %3424, %3425 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3427 = torch.aten.dequantize.self %3426 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%3428 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1115 = torch.constant.int 12
%3430 = torch.aten.item %3428 : !torch.vtensor<[],f32> -> !torch.float
%3431 = torch.aten.item %3429 : !torch.vtensor<[],si8> -> !torch.int
%3432 = torch.aten.quantize_per_tensor %93, %3430, %3431, %int12_1115 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3433 = torch.aten.int_repr %3432 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3434 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3436 = torch.aten.item %3434 : !torch.vtensor<[],f32> -> !torch.float
%3437 = torch.aten.item %3435 : !torch.vtensor<[],si8> -> !torch.int
%3438 = torch.aten._make_per_tensor_quantized_tensor %3433, %3436, %3437 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3439 = torch.aten.dequantize.self %3438 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_1116 = torch.constant.int 2
%int2_1117 = torch.constant.int 2
%int2_1118 = torch.constant.int 2
%int2_1119 = torch.constant.int 2
%int1_1120 = torch.constant.int 1
%int1_1121 = torch.constant.int 1
%int0_1122 = torch.constant.int 0
%3440 = torch.prim.ListConstruct %int2_1116, %int2_1117 : (!torch.int, !torch.int) -> !torch.list<int>
%3441 = torch.prim.ListConstruct %int2_1118, %int2_1119 : (!torch.int, !torch.int) -> !torch.list<int>
%3442 = torch.prim.ListConstruct %int1_1120, %int1_1121 : (!torch.int, !torch.int) -> !torch.list<int>
%3443 = torch.prim.ListConstruct %int0_1122, %int0_1122 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1123 = torch.constant.bool false
%int1_1124 = torch.constant.int 1
%3444 = torch.aten.convolution %3415, %3427, %3439, %3442, %3440, %3441, %false_1123, %3443, %int1_1124 : !torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%3445 = torch.aten.relu %3444 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%3446 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1125 = torch.constant.int 12
%3448 = torch.aten.item %3446 : !torch.vtensor<[],f32> -> !torch.float
%3449 = torch.aten.item %3447 : !torch.vtensor<[],si8> -> !torch.int
%3450 = torch.aten.quantize_per_tensor %3445, %3448, %3449, %int12_1125 : !torch.vtensor<[1,256,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%3451 = torch.aten.int_repr %3450 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],si8>
%3452 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3453 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3454 = torch.aten.item %3452 : !torch.vtensor<[],f32> -> !torch.float
%3455 = torch.aten.item %3453 : !torch.vtensor<[],si8> -> !torch.int
%3456 = torch.aten._make_per_tensor_quantized_tensor %3451, %3454, %3455 : !torch.vtensor<[1,256,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%3457 = torch.aten.dequantize.self %3456 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],f32>
%3458 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3459 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1126 = torch.constant.int 12
%3460 = torch.aten.item %3458 : !torch.vtensor<[],f32> -> !torch.float
%3461 = torch.aten.item %3459 : !torch.vtensor<[],si8> -> !torch.int
%3462 = torch.aten.quantize_per_tensor %94, %3460, %3461, %int12_1126 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3463 = torch.aten.int_repr %3462 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%3464 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3465 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3466 = torch.aten.item %3464 : !torch.vtensor<[],f32> -> !torch.float
%3467 = torch.aten.item %3465 : !torch.vtensor<[],si8> -> !torch.int
%3468 = torch.aten._make_per_tensor_quantized_tensor %3463, %3466, %3467 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3469 = torch.aten.dequantize.self %3468 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%3470 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1127 = torch.constant.int 12
%3472 = torch.aten.item %3470 : !torch.vtensor<[],f32> -> !torch.float
%3473 = torch.aten.item %3471 : !torch.vtensor<[],si8> -> !torch.int
%3474 = torch.aten.quantize_per_tensor %95, %3472, %3473, %int12_1127 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3475 = torch.aten.int_repr %3474 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3476 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3478 = torch.aten.item %3476 : !torch.vtensor<[],f32> -> !torch.float
%3479 = torch.aten.item %3477 : !torch.vtensor<[],si8> -> !torch.int
%3480 = torch.aten._make_per_tensor_quantized_tensor %3475, %3478, %3479 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3481 = torch.aten.dequantize.self %3480 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int4_1128 = torch.constant.int 4
%int4_1129 = torch.constant.int 4
%int4_1130 = torch.constant.int 4
%int4_1131 = torch.constant.int 4
%int1_1132 = torch.constant.int 1
%int1_1133 = torch.constant.int 1
%int0_1134 = torch.constant.int 0
%3482 = torch.prim.ListConstruct %int4_1128, %int4_1129 : (!torch.int, !torch.int) -> !torch.list<int>
%3483 = torch.prim.ListConstruct %int4_1130, %int4_1131 : (!torch.int, !torch.int) -> !torch.list<int>
%3484 = torch.prim.ListConstruct %int1_1132, %int1_1133 : (!torch.int, !torch.int) -> !torch.list<int>
%3485 = torch.prim.ListConstruct %int0_1134, %int0_1134 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1135 = torch.constant.bool false
%int1_1136 = torch.constant.int 1
%3486 = torch.aten.convolution %3457, %3469, %3481, %3484, %3482, %3483, %false_1135, %3485, %int1_1136 : !torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%3487 = torch.aten.relu %3486 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%3488 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3489 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1137 = torch.constant.int 12
%3490 = torch.aten.item %3488 : !torch.vtensor<[],f32> -> !torch.float
%3491 = torch.aten.item %3489 : !torch.vtensor<[],si8> -> !torch.int
%3492 = torch.aten.quantize_per_tensor %3487, %3490, %3491, %int12_1137 : !torch.vtensor<[1,256,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%3493 = torch.aten.int_repr %3492 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],si8>
%3494 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3495 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3496 = torch.aten.item %3494 : !torch.vtensor<[],f32> -> !torch.float
%3497 = torch.aten.item %3495 : !torch.vtensor<[],si8> -> !torch.int
%3498 = torch.aten._make_per_tensor_quantized_tensor %3493, %3496, %3497 : !torch.vtensor<[1,256,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%3499 = torch.aten.dequantize.self %3498 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],f32>
%3500 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3501 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1138 = torch.constant.int 12
%3502 = torch.aten.item %3500 : !torch.vtensor<[],f32> -> !torch.float
%3503 = torch.aten.item %3501 : !torch.vtensor<[],si8> -> !torch.int
%3504 = torch.aten.quantize_per_tensor %96, %3502, %3503, %int12_1138 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3505 = torch.aten.int_repr %3504 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%3506 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3507 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3508 = torch.aten.item %3506 : !torch.vtensor<[],f32> -> !torch.float
%3509 = torch.aten.item %3507 : !torch.vtensor<[],si8> -> !torch.int
%3510 = torch.aten._make_per_tensor_quantized_tensor %3505, %3508, %3509 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3511 = torch.aten.dequantize.self %3510 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%3512 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3513 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1139 = torch.constant.int 12
%3514 = torch.aten.item %3512 : !torch.vtensor<[],f32> -> !torch.float
%3515 = torch.aten.item %3513 : !torch.vtensor<[],si8> -> !torch.int
%3516 = torch.aten.quantize_per_tensor %97, %3514, %3515, %int12_1139 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3517 = torch.aten.int_repr %3516 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3518 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3519 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3520 = torch.aten.item %3518 : !torch.vtensor<[],f32> -> !torch.float
%3521 = torch.aten.item %3519 : !torch.vtensor<[],si8> -> !torch.int
%3522 = torch.aten._make_per_tensor_quantized_tensor %3517, %3520, %3521 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3523 = torch.aten.dequantize.self %3522 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int8 = torch.constant.int 8
%int8_1140 = torch.constant.int 8
%int8_1141 = torch.constant.int 8
%int8_1142 = torch.constant.int 8
%int1_1143 = torch.constant.int 1
%int1_1144 = torch.constant.int 1
%int0_1145 = torch.constant.int 0
%3524 = torch.prim.ListConstruct %int8, %int8_1140 : (!torch.int, !torch.int) -> !torch.list<int>
%3525 = torch.prim.ListConstruct %int8_1141, %int8_1142 : (!torch.int, !torch.int) -> !torch.list<int>
%3526 = torch.prim.ListConstruct %int1_1143, %int1_1144 : (!torch.int, !torch.int) -> !torch.list<int>
%3527 = torch.prim.ListConstruct %int0_1145, %int0_1145 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1146 = torch.constant.bool false
%int1_1147 = torch.constant.int 1
%3528 = torch.aten.convolution %3499, %3511, %3523, %3526, %3524, %3525, %false_1146, %3527, %int1_1147 : !torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%3529 = torch.aten.relu %3528 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%3530 = torch.prim.ListConstruct %3529, %3499 : (!torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[1,256,20,20],f32>) -> !torch.list<vtensor>
%int1_1148 = torch.constant.int 1
%3531 = torch.aten.cat %3530, %int1_1148 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%3532 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3533 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1149 = torch.constant.int 12
%3534 = torch.aten.item %3532 : !torch.vtensor<[],f32> -> !torch.float
%3535 = torch.aten.item %3533 : !torch.vtensor<[],si8> -> !torch.int
%3536 = torch.aten.quantize_per_tensor %3531, %3534, %3535, %int12_1149 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3537 = torch.aten.int_repr %3536 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3538 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3539 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3540 = torch.aten.item %3538 : !torch.vtensor<[],f32> -> !torch.float
%3541 = torch.aten.item %3539 : !torch.vtensor<[],si8> -> !torch.int
%3542 = torch.aten._make_per_tensor_quantized_tensor %3537, %3540, %3541 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3543 = torch.aten.dequantize.self %3542 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%3544 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3545 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1150 = torch.constant.int 12
%3546 = torch.aten.item %3544 : !torch.vtensor<[],f32> -> !torch.float
%3547 = torch.aten.item %3545 : !torch.vtensor<[],si8> -> !torch.int
%3548 = torch.aten.quantize_per_tensor %98, %3546, %3547, %int12_1150 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3549 = torch.aten.int_repr %3548 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%3550 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3551 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3552 = torch.aten.item %3550 : !torch.vtensor<[],f32> -> !torch.float
%3553 = torch.aten.item %3551 : !torch.vtensor<[],si8> -> !torch.int
%3554 = torch.aten._make_per_tensor_quantized_tensor %3549, %3552, %3553 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3555 = torch.aten.dequantize.self %3554 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%3556 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3557 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1151 = torch.constant.int 12
%3558 = torch.aten.item %3556 : !torch.vtensor<[],f32> -> !torch.float
%3559 = torch.aten.item %3557 : !torch.vtensor<[],si8> -> !torch.int
%3560 = torch.aten.quantize_per_tensor %99, %3558, %3559, %int12_1151 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3561 = torch.aten.int_repr %3560 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3562 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3563 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3564 = torch.aten.item %3562 : !torch.vtensor<[],f32> -> !torch.float
%3565 = torch.aten.item %3563 : !torch.vtensor<[],si8> -> !torch.int
%3566 = torch.aten._make_per_tensor_quantized_tensor %3561, %3564, %3565 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3567 = torch.aten.dequantize.self %3566 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int4_1152 = torch.constant.int 4
%int4_1153 = torch.constant.int 4
%int4_1154 = torch.constant.int 4
%int4_1155 = torch.constant.int 4
%int1_1156 = torch.constant.int 1
%int1_1157 = torch.constant.int 1
%int0_1158 = torch.constant.int 0
%3568 = torch.prim.ListConstruct %int4_1152, %int4_1153 : (!torch.int, !torch.int) -> !torch.list<int>
%3569 = torch.prim.ListConstruct %int4_1154, %int4_1155 : (!torch.int, !torch.int) -> !torch.list<int>
%3570 = torch.prim.ListConstruct %int1_1156, %int1_1157 : (!torch.int, !torch.int) -> !torch.list<int>
%3571 = torch.prim.ListConstruct %int0_1158, %int0_1158 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1159 = torch.constant.bool false
%int1_1160 = torch.constant.int 1
%3572 = torch.aten.convolution %3543, %3555, %3567, %3570, %3568, %3569, %false_1159, %3571, %int1_1160 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%3573 = torch.aten.relu %3572 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%3574 = torch.prim.ListConstruct %3573, %3457 : (!torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[1,256,20,20],f32>) -> !torch.list<vtensor>
%int1_1161 = torch.constant.int 1
%3575 = torch.aten.cat %3574, %int1_1161 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%3576 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3577 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1162 = torch.constant.int 12
%3578 = torch.aten.item %3576 : !torch.vtensor<[],f32> -> !torch.float
%3579 = torch.aten.item %3577 : !torch.vtensor<[],si8> -> !torch.int
%3580 = torch.aten.quantize_per_tensor %3575, %3578, %3579, %int12_1162 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3581 = torch.aten.int_repr %3580 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3582 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3583 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3584 = torch.aten.item %3582 : !torch.vtensor<[],f32> -> !torch.float
%3585 = torch.aten.item %3583 : !torch.vtensor<[],si8> -> !torch.int
%3586 = torch.aten._make_per_tensor_quantized_tensor %3581, %3584, %3585 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3587 = torch.aten.dequantize.self %3586 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%3588 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3589 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1163 = torch.constant.int 12
%3590 = torch.aten.item %3588 : !torch.vtensor<[],f32> -> !torch.float
%3591 = torch.aten.item %3589 : !torch.vtensor<[],si8> -> !torch.int
%3592 = torch.aten.quantize_per_tensor %100, %3590, %3591, %int12_1163 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3593 = torch.aten.int_repr %3592 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%3594 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3595 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3596 = torch.aten.item %3594 : !torch.vtensor<[],f32> -> !torch.float
%3597 = torch.aten.item %3595 : !torch.vtensor<[],si8> -> !torch.int
%3598 = torch.aten._make_per_tensor_quantized_tensor %3593, %3596, %3597 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3599 = torch.aten.dequantize.self %3598 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%3600 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3601 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1164 = torch.constant.int 12
%3602 = torch.aten.item %3600 : !torch.vtensor<[],f32> -> !torch.float
%3603 = torch.aten.item %3601 : !torch.vtensor<[],si8> -> !torch.int
%3604 = torch.aten.quantize_per_tensor %101, %3602, %3603, %int12_1164 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3605 = torch.aten.int_repr %3604 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3606 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3607 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3608 = torch.aten.item %3606 : !torch.vtensor<[],f32> -> !torch.float
%3609 = torch.aten.item %3607 : !torch.vtensor<[],si8> -> !torch.int
%3610 = torch.aten._make_per_tensor_quantized_tensor %3605, %3608, %3609 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3611 = torch.aten.dequantize.self %3610 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_1165 = torch.constant.int 2
%int2_1166 = torch.constant.int 2
%int2_1167 = torch.constant.int 2
%int2_1168 = torch.constant.int 2
%int1_1169 = torch.constant.int 1
%int1_1170 = torch.constant.int 1
%int0_1171 = torch.constant.int 0
%3612 = torch.prim.ListConstruct %int2_1165, %int2_1166 : (!torch.int, !torch.int) -> !torch.list<int>
%3613 = torch.prim.ListConstruct %int2_1167, %int2_1168 : (!torch.int, !torch.int) -> !torch.list<int>
%3614 = torch.prim.ListConstruct %int1_1169, %int1_1170 : (!torch.int, !torch.int) -> !torch.list<int>
%3615 = torch.prim.ListConstruct %int0_1171, %int0_1171 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1172 = torch.constant.bool false
%int1_1173 = torch.constant.int 1
%3616 = torch.aten.convolution %3587, %3599, %3611, %3614, %3612, %3613, %false_1172, %3615, %int1_1173 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%3617 = torch.aten.relu %3616 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%3618 = torch.prim.ListConstruct %3617, %3415 : (!torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[1,256,20,20],f32>) -> !torch.list<vtensor>
%int1_1174 = torch.constant.int 1
%3619 = torch.aten.cat %3618, %int1_1174 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%3620 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3621 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1175 = torch.constant.int 12
%3622 = torch.aten.item %3620 : !torch.vtensor<[],f32> -> !torch.float
%3623 = torch.aten.item %3621 : !torch.vtensor<[],si8> -> !torch.int
%3624 = torch.aten.quantize_per_tensor %3619, %3622, %3623, %int12_1175 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3625 = torch.aten.int_repr %3624 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3626 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3627 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3628 = torch.aten.item %3626 : !torch.vtensor<[],f32> -> !torch.float
%3629 = torch.aten.item %3627 : !torch.vtensor<[],si8> -> !torch.int
%3630 = torch.aten._make_per_tensor_quantized_tensor %3625, %3628, %3629 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3631 = torch.aten.dequantize.self %3630 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%3632 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1176 = torch.constant.int 12
%3634 = torch.aten.item %3632 : !torch.vtensor<[],f32> -> !torch.float
%3635 = torch.aten.item %3633 : !torch.vtensor<[],si8> -> !torch.int
%3636 = torch.aten.quantize_per_tensor %102, %3634, %3635, %int12_1176 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%3637 = torch.aten.int_repr %3636 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%3638 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3639 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3640 = torch.aten.item %3638 : !torch.vtensor<[],f32> -> !torch.float
%3641 = torch.aten.item %3639 : !torch.vtensor<[],si8> -> !torch.int
%3642 = torch.aten._make_per_tensor_quantized_tensor %3637, %3640, %3641 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%3643 = torch.aten.dequantize.self %3642 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%3644 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3645 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1177 = torch.constant.int 12
%3646 = torch.aten.item %3644 : !torch.vtensor<[],f32> -> !torch.float
%3647 = torch.aten.item %3645 : !torch.vtensor<[],si8> -> !torch.int
%3648 = torch.aten.quantize_per_tensor %103, %3646, %3647, %int12_1177 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3649 = torch.aten.int_repr %3648 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%3650 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3651 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3652 = torch.aten.item %3650 : !torch.vtensor<[],f32> -> !torch.float
%3653 = torch.aten.item %3651 : !torch.vtensor<[],si8> -> !torch.int
%3654 = torch.aten._make_per_tensor_quantized_tensor %3649, %3652, %3653 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3655 = torch.aten.dequantize.self %3654 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1178 = torch.constant.int 1
%int1_1179 = torch.constant.int 1
%int1_1180 = torch.constant.int 1
%int1_1181 = torch.constant.int 1
%int1_1182 = torch.constant.int 1
%int1_1183 = torch.constant.int 1
%int0_1184 = torch.constant.int 0
%3656 = torch.prim.ListConstruct %int1_1178, %int1_1179 : (!torch.int, !torch.int) -> !torch.list<int>
%3657 = torch.prim.ListConstruct %int1_1180, %int1_1181 : (!torch.int, !torch.int) -> !torch.list<int>
%3658 = torch.prim.ListConstruct %int1_1182, %int1_1183 : (!torch.int, !torch.int) -> !torch.list<int>
%3659 = torch.prim.ListConstruct %int0_1184, %int0_1184 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1185 = torch.constant.bool false
%int1_1186 = torch.constant.int 1
%3660 = torch.aten.convolution %3631, %3643, %3655, %3658, %3656, %3657, %false_1185, %3659, %int1_1186 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%3661 = torch.aten.relu %3660 : !torch.vtensor<[1,512,20,20],f32> -> !torch.vtensor<[1,512,20,20],f32>
%3662 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3663 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1187 = torch.constant.int 12
%3664 = torch.aten.item %3662 : !torch.vtensor<[],f32> -> !torch.float
%3665 = torch.aten.item %3663 : !torch.vtensor<[],si8> -> !torch.int
%3666 = torch.aten.quantize_per_tensor %3661, %3664, %3665, %int12_1187 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3667 = torch.aten.int_repr %3666 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3668 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3669 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3670 = torch.aten.item %3668 : !torch.vtensor<[],f32> -> !torch.float
%3671 = torch.aten.item %3669 : !torch.vtensor<[],si8> -> !torch.int
%3672 = torch.aten._make_per_tensor_quantized_tensor %3667, %3670, %3671 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3673 = torch.aten.dequantize.self %3672 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%int1_1188 = torch.constant.int 1
%3674 = torch.aten.add.Tensor %3673, %3373, %int1_1188 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[1,512,20,20],f32>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%3675 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3676 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1189 = torch.constant.int 12
%3677 = torch.aten.item %3675 : !torch.vtensor<[],f32> -> !torch.float
%3678 = torch.aten.item %3676 : !torch.vtensor<[],si8> -> !torch.int
%3679 = torch.aten.quantize_per_tensor %3674, %3677, %3678, %int12_1189 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3680 = torch.aten.int_repr %3679 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%3681 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3682 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3683 = torch.aten.item %3681 : !torch.vtensor<[],f32> -> !torch.float
%3684 = torch.aten.item %3682 : !torch.vtensor<[],si8> -> !torch.int
%3685 = torch.aten._make_per_tensor_quantized_tensor %3680, %3683, %3684 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%3686 = torch.aten.dequantize.self %3685 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%int2_1190 = torch.constant.int 2
%int2_1191 = torch.constant.int 2
%3687 = torch.prim.ListConstruct %int2_1190, %int2_1191 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1192 = torch.constant.int 0
%int0_1193 = torch.constant.int 0
%3688 = torch.prim.ListConstruct %int0_1192, %int0_1193 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1194 = torch.constant.int 2
%int2_1195 = torch.constant.int 2
%3689 = torch.prim.ListConstruct %int2_1194, %int2_1195 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1196 = torch.constant.int 1
%int1_1197 = torch.constant.int 1
%3690 = torch.prim.ListConstruct %int1_1196, %int1_1197 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1198 = torch.constant.bool true
%3691 = torch.aten.max_pool2d %3686, %3687, %3689, %3688, %3690, %true_1198 : !torch.vtensor<[1,512,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,512,10,10],f32>
%3692 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3693 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1199 = torch.constant.int 12
%3694 = torch.aten.item %3692 : !torch.vtensor<[],f32> -> !torch.float
%3695 = torch.aten.item %3693 : !torch.vtensor<[],si8> -> !torch.int
%3696 = torch.aten.quantize_per_tensor %3691, %3694, %3695, %int12_1199 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3697 = torch.aten.int_repr %3696 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%3698 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3699 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3700 = torch.aten.item %3698 : !torch.vtensor<[],f32> -> !torch.float
%3701 = torch.aten.item %3699 : !torch.vtensor<[],si8> -> !torch.int
%3702 = torch.aten._make_per_tensor_quantized_tensor %3697, %3700, %3701 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3703 = torch.aten.dequantize.self %3702 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%3704 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3705 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1200 = torch.constant.int 12
%3706 = torch.aten.item %3704 : !torch.vtensor<[],f32> -> !torch.float
%3707 = torch.aten.item %3705 : !torch.vtensor<[],si8> -> !torch.int
%3708 = torch.aten.quantize_per_tensor %104, %3706, %3707, %int12_1200 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%3709 = torch.aten.int_repr %3708 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%3710 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3711 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3712 = torch.aten.item %3710 : !torch.vtensor<[],f32> -> !torch.float
%3713 = torch.aten.item %3711 : !torch.vtensor<[],si8> -> !torch.int
%3714 = torch.aten._make_per_tensor_quantized_tensor %3709, %3712, %3713 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%3715 = torch.aten.dequantize.self %3714 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%3716 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3717 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1201 = torch.constant.int 12
%3718 = torch.aten.item %3716 : !torch.vtensor<[],f32> -> !torch.float
%3719 = torch.aten.item %3717 : !torch.vtensor<[],si8> -> !torch.int
%3720 = torch.aten.quantize_per_tensor %105, %3718, %3719, %int12_1201 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3721 = torch.aten.int_repr %3720 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%3722 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3723 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3724 = torch.aten.item %3722 : !torch.vtensor<[],f32> -> !torch.float
%3725 = torch.aten.item %3723 : !torch.vtensor<[],si8> -> !torch.int
%3726 = torch.aten._make_per_tensor_quantized_tensor %3721, %3724, %3725 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%3727 = torch.aten.dequantize.self %3726 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1202 = torch.constant.int 1
%int1_1203 = torch.constant.int 1
%int1_1204 = torch.constant.int 1
%int1_1205 = torch.constant.int 1
%int1_1206 = torch.constant.int 1
%int1_1207 = torch.constant.int 1
%int0_1208 = torch.constant.int 0
%3728 = torch.prim.ListConstruct %int1_1202, %int1_1203 : (!torch.int, !torch.int) -> !torch.list<int>
%3729 = torch.prim.ListConstruct %int1_1204, %int1_1205 : (!torch.int, !torch.int) -> !torch.list<int>
%3730 = torch.prim.ListConstruct %int1_1206, %int1_1207 : (!torch.int, !torch.int) -> !torch.list<int>
%3731 = torch.prim.ListConstruct %int0_1208, %int0_1208 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1209 = torch.constant.bool false
%int1_1210 = torch.constant.int 1
%3732 = torch.aten.convolution %3703, %3715, %3727, %3730, %3728, %3729, %false_1209, %3731, %int1_1210 : !torch.vtensor<[1,512,10,10],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,10,10],f32>
%3733 = torch.aten.relu %3732 : !torch.vtensor<[1,512,10,10],f32> -> !torch.vtensor<[1,512,10,10],f32>
%3734 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3735 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1211 = torch.constant.int 12
%3736 = torch.aten.item %3734 : !torch.vtensor<[],f32> -> !torch.float
%3737 = torch.aten.item %3735 : !torch.vtensor<[],si8> -> !torch.int
%3738 = torch.aten.quantize_per_tensor %3733, %3736, %3737, %int12_1211 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3739 = torch.aten.int_repr %3738 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%3740 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3742 = torch.aten.item %3740 : !torch.vtensor<[],f32> -> !torch.float
%3743 = torch.aten.item %3741 : !torch.vtensor<[],si8> -> !torch.int
%3744 = torch.aten._make_per_tensor_quantized_tensor %3739, %3742, %3743 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3745 = torch.aten.dequantize.self %3744 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%3746 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1212 = torch.constant.int 12
%3748 = torch.aten.item %3746 : !torch.vtensor<[],f32> -> !torch.float
%3749 = torch.aten.item %3747 : !torch.vtensor<[],si8> -> !torch.int
%3750 = torch.aten.quantize_per_tensor %106, %3748, %3749, %int12_1212 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3751 = torch.aten.int_repr %3750 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%3752 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3754 = torch.aten.item %3752 : !torch.vtensor<[],f32> -> !torch.float
%3755 = torch.aten.item %3753 : !torch.vtensor<[],si8> -> !torch.int
%3756 = torch.aten._make_per_tensor_quantized_tensor %3751, %3754, %3755 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3757 = torch.aten.dequantize.self %3756 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%3758 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3759 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1213 = torch.constant.int 12
%3760 = torch.aten.item %3758 : !torch.vtensor<[],f32> -> !torch.float
%3761 = torch.aten.item %3759 : !torch.vtensor<[],si8> -> !torch.int
%3762 = torch.aten.quantize_per_tensor %107, %3760, %3761, %int12_1213 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3763 = torch.aten.int_repr %3762 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3764 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3765 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3766 = torch.aten.item %3764 : !torch.vtensor<[],f32> -> !torch.float
%3767 = torch.aten.item %3765 : !torch.vtensor<[],si8> -> !torch.int
%3768 = torch.aten._make_per_tensor_quantized_tensor %3763, %3766, %3767 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3769 = torch.aten.dequantize.self %3768 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_1214 = torch.constant.int 1
%int1_1215 = torch.constant.int 1
%int1_1216 = torch.constant.int 1
%int1_1217 = torch.constant.int 1
%int1_1218 = torch.constant.int 1
%int1_1219 = torch.constant.int 1
%int0_1220 = torch.constant.int 0
%3770 = torch.prim.ListConstruct %int1_1214, %int1_1215 : (!torch.int, !torch.int) -> !torch.list<int>
%3771 = torch.prim.ListConstruct %int1_1216, %int1_1217 : (!torch.int, !torch.int) -> !torch.list<int>
%3772 = torch.prim.ListConstruct %int1_1218, %int1_1219 : (!torch.int, !torch.int) -> !torch.list<int>
%3773 = torch.prim.ListConstruct %int0_1220, %int0_1220 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1221 = torch.constant.bool false
%int1_1222 = torch.constant.int 1
%3774 = torch.aten.convolution %3745, %3757, %3769, %3772, %3770, %3771, %false_1221, %3773, %int1_1222 : !torch.vtensor<[1,512,10,10],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3775 = torch.aten.relu %3774 : !torch.vtensor<[1,256,10,10],f32> -> !torch.vtensor<[1,256,10,10],f32>
%3776 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3777 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1223 = torch.constant.int 12
%3778 = torch.aten.item %3776 : !torch.vtensor<[],f32> -> !torch.float
%3779 = torch.aten.item %3777 : !torch.vtensor<[],si8> -> !torch.int
%3780 = torch.aten.quantize_per_tensor %3775, %3778, %3779, %int12_1223 : !torch.vtensor<[1,256,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3781 = torch.aten.int_repr %3780 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],si8>
%3782 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3784 = torch.aten.item %3782 : !torch.vtensor<[],f32> -> !torch.float
%3785 = torch.aten.item %3783 : !torch.vtensor<[],si8> -> !torch.int
%3786 = torch.aten._make_per_tensor_quantized_tensor %3781, %3784, %3785 : !torch.vtensor<[1,256,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3787 = torch.aten.dequantize.self %3786 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],f32>
%3788 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3789 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1224 = torch.constant.int 12
%3790 = torch.aten.item %3788 : !torch.vtensor<[],f32> -> !torch.float
%3791 = torch.aten.item %3789 : !torch.vtensor<[],si8> -> !torch.int
%3792 = torch.aten.quantize_per_tensor %108, %3790, %3791, %int12_1224 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3793 = torch.aten.int_repr %3792 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%3794 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3796 = torch.aten.item %3794 : !torch.vtensor<[],f32> -> !torch.float
%3797 = torch.aten.item %3795 : !torch.vtensor<[],si8> -> !torch.int
%3798 = torch.aten._make_per_tensor_quantized_tensor %3793, %3796, %3797 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3799 = torch.aten.dequantize.self %3798 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%3800 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1225 = torch.constant.int 12
%3802 = torch.aten.item %3800 : !torch.vtensor<[],f32> -> !torch.float
%3803 = torch.aten.item %3801 : !torch.vtensor<[],si8> -> !torch.int
%3804 = torch.aten.quantize_per_tensor %109, %3802, %3803, %int12_1225 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3805 = torch.aten.int_repr %3804 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3806 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3807 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3808 = torch.aten.item %3806 : !torch.vtensor<[],f32> -> !torch.float
%3809 = torch.aten.item %3807 : !torch.vtensor<[],si8> -> !torch.int
%3810 = torch.aten._make_per_tensor_quantized_tensor %3805, %3808, %3809 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3811 = torch.aten.dequantize.self %3810 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_1226 = torch.constant.int 2
%int2_1227 = torch.constant.int 2
%int2_1228 = torch.constant.int 2
%int2_1229 = torch.constant.int 2
%int1_1230 = torch.constant.int 1
%int1_1231 = torch.constant.int 1
%int0_1232 = torch.constant.int 0
%3812 = torch.prim.ListConstruct %int2_1226, %int2_1227 : (!torch.int, !torch.int) -> !torch.list<int>
%3813 = torch.prim.ListConstruct %int2_1228, %int2_1229 : (!torch.int, !torch.int) -> !torch.list<int>
%3814 = torch.prim.ListConstruct %int1_1230, %int1_1231 : (!torch.int, !torch.int) -> !torch.list<int>
%3815 = torch.prim.ListConstruct %int0_1232, %int0_1232 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1233 = torch.constant.bool false
%int1_1234 = torch.constant.int 1
%3816 = torch.aten.convolution %3787, %3799, %3811, %3814, %3812, %3813, %false_1233, %3815, %int1_1234 : !torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3817 = torch.aten.relu %3816 : !torch.vtensor<[1,256,10,10],f32> -> !torch.vtensor<[1,256,10,10],f32>
%3818 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3819 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1235 = torch.constant.int 12
%3820 = torch.aten.item %3818 : !torch.vtensor<[],f32> -> !torch.float
%3821 = torch.aten.item %3819 : !torch.vtensor<[],si8> -> !torch.int
%3822 = torch.aten.quantize_per_tensor %3817, %3820, %3821, %int12_1235 : !torch.vtensor<[1,256,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3823 = torch.aten.int_repr %3822 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],si8>
%3824 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3825 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3826 = torch.aten.item %3824 : !torch.vtensor<[],f32> -> !torch.float
%3827 = torch.aten.item %3825 : !torch.vtensor<[],si8> -> !torch.int
%3828 = torch.aten._make_per_tensor_quantized_tensor %3823, %3826, %3827 : !torch.vtensor<[1,256,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3829 = torch.aten.dequantize.self %3828 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],f32>
%3830 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3831 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1236 = torch.constant.int 12
%3832 = torch.aten.item %3830 : !torch.vtensor<[],f32> -> !torch.float
%3833 = torch.aten.item %3831 : !torch.vtensor<[],si8> -> !torch.int
%3834 = torch.aten.quantize_per_tensor %110, %3832, %3833, %int12_1236 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3835 = torch.aten.int_repr %3834 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%3836 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%3837 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3838 = torch.aten.item %3836 : !torch.vtensor<[],f32> -> !torch.float
%3839 = torch.aten.item %3837 : !torch.vtensor<[],si8> -> !torch.int
%3840 = torch.aten._make_per_tensor_quantized_tensor %3835, %3838, %3839 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3841 = torch.aten.dequantize.self %3840 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%3842 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3843 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1237 = torch.constant.int 12
%3844 = torch.aten.item %3842 : !torch.vtensor<[],f32> -> !torch.float
%3845 = torch.aten.item %3843 : !torch.vtensor<[],si8> -> !torch.int
%3846 = torch.aten.quantize_per_tensor %111, %3844, %3845, %int12_1237 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3847 = torch.aten.int_repr %3846 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3848 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3849 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3850 = torch.aten.item %3848 : !torch.vtensor<[],f32> -> !torch.float
%3851 = torch.aten.item %3849 : !torch.vtensor<[],si8> -> !torch.int
%3852 = torch.aten._make_per_tensor_quantized_tensor %3847, %3850, %3851 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3853 = torch.aten.dequantize.self %3852 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int4_1238 = torch.constant.int 4
%int4_1239 = torch.constant.int 4
%int4_1240 = torch.constant.int 4
%int4_1241 = torch.constant.int 4
%int1_1242 = torch.constant.int 1
%int1_1243 = torch.constant.int 1
%int0_1244 = torch.constant.int 0
%3854 = torch.prim.ListConstruct %int4_1238, %int4_1239 : (!torch.int, !torch.int) -> !torch.list<int>
%3855 = torch.prim.ListConstruct %int4_1240, %int4_1241 : (!torch.int, !torch.int) -> !torch.list<int>
%3856 = torch.prim.ListConstruct %int1_1242, %int1_1243 : (!torch.int, !torch.int) -> !torch.list<int>
%3857 = torch.prim.ListConstruct %int0_1244, %int0_1244 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1245 = torch.constant.bool false
%int1_1246 = torch.constant.int 1
%3858 = torch.aten.convolution %3829, %3841, %3853, %3856, %3854, %3855, %false_1245, %3857, %int1_1246 : !torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3859 = torch.aten.relu %3858 : !torch.vtensor<[1,256,10,10],f32> -> !torch.vtensor<[1,256,10,10],f32>
%3860 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3861 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1247 = torch.constant.int 12
%3862 = torch.aten.item %3860 : !torch.vtensor<[],f32> -> !torch.float
%3863 = torch.aten.item %3861 : !torch.vtensor<[],si8> -> !torch.int
%3864 = torch.aten.quantize_per_tensor %3859, %3862, %3863, %int12_1247 : !torch.vtensor<[1,256,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3865 = torch.aten.int_repr %3864 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],si8>
%3866 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3867 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3868 = torch.aten.item %3866 : !torch.vtensor<[],f32> -> !torch.float
%3869 = torch.aten.item %3867 : !torch.vtensor<[],si8> -> !torch.int
%3870 = torch.aten._make_per_tensor_quantized_tensor %3865, %3868, %3869 : !torch.vtensor<[1,256,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%3871 = torch.aten.dequantize.self %3870 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],f32>
%3872 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3873 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1248 = torch.constant.int 12
%3874 = torch.aten.item %3872 : !torch.vtensor<[],f32> -> !torch.float
%3875 = torch.aten.item %3873 : !torch.vtensor<[],si8> -> !torch.int
%3876 = torch.aten.quantize_per_tensor %112, %3874, %3875, %int12_1248 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3877 = torch.aten.int_repr %3876 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%3878 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3879 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3880 = torch.aten.item %3878 : !torch.vtensor<[],f32> -> !torch.float
%3881 = torch.aten.item %3879 : !torch.vtensor<[],si8> -> !torch.int
%3882 = torch.aten._make_per_tensor_quantized_tensor %3877, %3880, %3881 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%3883 = torch.aten.dequantize.self %3882 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%3884 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3885 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1249 = torch.constant.int 12
%3886 = torch.aten.item %3884 : !torch.vtensor<[],f32> -> !torch.float
%3887 = torch.aten.item %3885 : !torch.vtensor<[],si8> -> !torch.int
%3888 = torch.aten.quantize_per_tensor %113, %3886, %3887, %int12_1249 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3889 = torch.aten.int_repr %3888 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3890 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3891 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3892 = torch.aten.item %3890 : !torch.vtensor<[],f32> -> !torch.float
%3893 = torch.aten.item %3891 : !torch.vtensor<[],si8> -> !torch.int
%3894 = torch.aten._make_per_tensor_quantized_tensor %3889, %3892, %3893 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3895 = torch.aten.dequantize.self %3894 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int8_1250 = torch.constant.int 8
%int8_1251 = torch.constant.int 8
%int8_1252 = torch.constant.int 8
%int8_1253 = torch.constant.int 8
%int1_1254 = torch.constant.int 1
%int1_1255 = torch.constant.int 1
%int0_1256 = torch.constant.int 0
%3896 = torch.prim.ListConstruct %int8_1250, %int8_1251 : (!torch.int, !torch.int) -> !torch.list<int>
%3897 = torch.prim.ListConstruct %int8_1252, %int8_1253 : (!torch.int, !torch.int) -> !torch.list<int>
%3898 = torch.prim.ListConstruct %int1_1254, %int1_1255 : (!torch.int, !torch.int) -> !torch.list<int>
%3899 = torch.prim.ListConstruct %int0_1256, %int0_1256 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1257 = torch.constant.bool false
%int1_1258 = torch.constant.int 1
%3900 = torch.aten.convolution %3871, %3883, %3895, %3898, %3896, %3897, %false_1257, %3899, %int1_1258 : !torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3901 = torch.aten.relu %3900 : !torch.vtensor<[1,256,10,10],f32> -> !torch.vtensor<[1,256,10,10],f32>
%3902 = torch.prim.ListConstruct %3901, %3871 : (!torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[1,256,10,10],f32>) -> !torch.list<vtensor>
%int1_1259 = torch.constant.int 1
%3903 = torch.aten.cat %3902, %int1_1259 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,10,10],f32>
%3904 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3905 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1260 = torch.constant.int 12
%3906 = torch.aten.item %3904 : !torch.vtensor<[],f32> -> !torch.float
%3907 = torch.aten.item %3905 : !torch.vtensor<[],si8> -> !torch.int
%3908 = torch.aten.quantize_per_tensor %3903, %3906, %3907, %int12_1260 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3909 = torch.aten.int_repr %3908 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%3910 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3911 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3912 = torch.aten.item %3910 : !torch.vtensor<[],f32> -> !torch.float
%3913 = torch.aten.item %3911 : !torch.vtensor<[],si8> -> !torch.int
%3914 = torch.aten._make_per_tensor_quantized_tensor %3909, %3912, %3913 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3915 = torch.aten.dequantize.self %3914 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%3916 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3917 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1261 = torch.constant.int 12
%3918 = torch.aten.item %3916 : !torch.vtensor<[],f32> -> !torch.float
%3919 = torch.aten.item %3917 : !torch.vtensor<[],si8> -> !torch.int
%3920 = torch.aten.quantize_per_tensor %114, %3918, %3919, %int12_1261 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3921 = torch.aten.int_repr %3920 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%3922 = torch.vtensor.literal(dense<2.44140625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3923 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3924 = torch.aten.item %3922 : !torch.vtensor<[],f32> -> !torch.float
%3925 = torch.aten.item %3923 : !torch.vtensor<[],si8> -> !torch.int
%3926 = torch.aten._make_per_tensor_quantized_tensor %3921, %3924, %3925 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3927 = torch.aten.dequantize.self %3926 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%3928 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3929 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1262 = torch.constant.int 12
%3930 = torch.aten.item %3928 : !torch.vtensor<[],f32> -> !torch.float
%3931 = torch.aten.item %3929 : !torch.vtensor<[],si8> -> !torch.int
%3932 = torch.aten.quantize_per_tensor %115, %3930, %3931, %int12_1262 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3933 = torch.aten.int_repr %3932 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3934 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3935 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3936 = torch.aten.item %3934 : !torch.vtensor<[],f32> -> !torch.float
%3937 = torch.aten.item %3935 : !torch.vtensor<[],si8> -> !torch.int
%3938 = torch.aten._make_per_tensor_quantized_tensor %3933, %3936, %3937 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3939 = torch.aten.dequantize.self %3938 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int4_1263 = torch.constant.int 4
%int4_1264 = torch.constant.int 4
%int4_1265 = torch.constant.int 4
%int4_1266 = torch.constant.int 4
%int1_1267 = torch.constant.int 1
%int1_1268 = torch.constant.int 1
%int0_1269 = torch.constant.int 0
%3940 = torch.prim.ListConstruct %int4_1263, %int4_1264 : (!torch.int, !torch.int) -> !torch.list<int>
%3941 = torch.prim.ListConstruct %int4_1265, %int4_1266 : (!torch.int, !torch.int) -> !torch.list<int>
%3942 = torch.prim.ListConstruct %int1_1267, %int1_1268 : (!torch.int, !torch.int) -> !torch.list<int>
%3943 = torch.prim.ListConstruct %int0_1269, %int0_1269 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1270 = torch.constant.bool false
%int1_1271 = torch.constant.int 1
%3944 = torch.aten.convolution %3915, %3927, %3939, %3942, %3940, %3941, %false_1270, %3943, %int1_1271 : !torch.vtensor<[1,512,10,10],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3945 = torch.aten.relu %3944 : !torch.vtensor<[1,256,10,10],f32> -> !torch.vtensor<[1,256,10,10],f32>
%3946 = torch.prim.ListConstruct %3945, %3829 : (!torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[1,256,10,10],f32>) -> !torch.list<vtensor>
%int1_1272 = torch.constant.int 1
%3947 = torch.aten.cat %3946, %int1_1272 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,10,10],f32>
%3948 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3949 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1273 = torch.constant.int 12
%3950 = torch.aten.item %3948 : !torch.vtensor<[],f32> -> !torch.float
%3951 = torch.aten.item %3949 : !torch.vtensor<[],si8> -> !torch.int
%3952 = torch.aten.quantize_per_tensor %3947, %3950, %3951, %int12_1273 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3953 = torch.aten.int_repr %3952 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%3954 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3955 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3956 = torch.aten.item %3954 : !torch.vtensor<[],f32> -> !torch.float
%3957 = torch.aten.item %3955 : !torch.vtensor<[],si8> -> !torch.int
%3958 = torch.aten._make_per_tensor_quantized_tensor %3953, %3956, %3957 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3959 = torch.aten.dequantize.self %3958 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%3960 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3961 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1274 = torch.constant.int 12
%3962 = torch.aten.item %3960 : !torch.vtensor<[],f32> -> !torch.float
%3963 = torch.aten.item %3961 : !torch.vtensor<[],si8> -> !torch.int
%3964 = torch.aten.quantize_per_tensor %116, %3962, %3963, %int12_1274 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3965 = torch.aten.int_repr %3964 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%3966 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3967 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3968 = torch.aten.item %3966 : !torch.vtensor<[],f32> -> !torch.float
%3969 = torch.aten.item %3967 : !torch.vtensor<[],si8> -> !torch.int
%3970 = torch.aten._make_per_tensor_quantized_tensor %3965, %3968, %3969 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%3971 = torch.aten.dequantize.self %3970 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%3972 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3973 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1275 = torch.constant.int 12
%3974 = torch.aten.item %3972 : !torch.vtensor<[],f32> -> !torch.float
%3975 = torch.aten.item %3973 : !torch.vtensor<[],si8> -> !torch.int
%3976 = torch.aten.quantize_per_tensor %117, %3974, %3975, %int12_1275 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3977 = torch.aten.int_repr %3976 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%3978 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3979 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3980 = torch.aten.item %3978 : !torch.vtensor<[],f32> -> !torch.float
%3981 = torch.aten.item %3979 : !torch.vtensor<[],si8> -> !torch.int
%3982 = torch.aten._make_per_tensor_quantized_tensor %3977, %3980, %3981 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%3983 = torch.aten.dequantize.self %3982 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_1276 = torch.constant.int 2
%int2_1277 = torch.constant.int 2
%int2_1278 = torch.constant.int 2
%int2_1279 = torch.constant.int 2
%int1_1280 = torch.constant.int 1
%int1_1281 = torch.constant.int 1
%int0_1282 = torch.constant.int 0
%3984 = torch.prim.ListConstruct %int2_1276, %int2_1277 : (!torch.int, !torch.int) -> !torch.list<int>
%3985 = torch.prim.ListConstruct %int2_1278, %int2_1279 : (!torch.int, !torch.int) -> !torch.list<int>
%3986 = torch.prim.ListConstruct %int1_1280, %int1_1281 : (!torch.int, !torch.int) -> !torch.list<int>
%3987 = torch.prim.ListConstruct %int0_1282, %int0_1282 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1283 = torch.constant.bool false
%int1_1284 = torch.constant.int 1
%3988 = torch.aten.convolution %3959, %3971, %3983, %3986, %3984, %3985, %false_1283, %3987, %int1_1284 : !torch.vtensor<[1,512,10,10],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%3989 = torch.aten.relu %3988 : !torch.vtensor<[1,256,10,10],f32> -> !torch.vtensor<[1,256,10,10],f32>
%3990 = torch.prim.ListConstruct %3989, %3787 : (!torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[1,256,10,10],f32>) -> !torch.list<vtensor>
%int1_1285 = torch.constant.int 1
%3991 = torch.aten.cat %3990, %int1_1285 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,10,10],f32>
%3992 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3993 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1286 = torch.constant.int 12
%3994 = torch.aten.item %3992 : !torch.vtensor<[],f32> -> !torch.float
%3995 = torch.aten.item %3993 : !torch.vtensor<[],si8> -> !torch.int
%3996 = torch.aten.quantize_per_tensor %3991, %3994, %3995, %int12_1286 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%3997 = torch.aten.int_repr %3996 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%3998 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3999 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4000 = torch.aten.item %3998 : !torch.vtensor<[],f32> -> !torch.float
%4001 = torch.aten.item %3999 : !torch.vtensor<[],si8> -> !torch.int
%4002 = torch.aten._make_per_tensor_quantized_tensor %3997, %4000, %4001 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%4003 = torch.aten.dequantize.self %4002 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%4004 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4005 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1287 = torch.constant.int 12
%4006 = torch.aten.item %4004 : !torch.vtensor<[],f32> -> !torch.float
%4007 = torch.aten.item %4005 : !torch.vtensor<[],si8> -> !torch.int
%4008 = torch.aten.quantize_per_tensor %118, %4006, %4007, %int12_1287 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%4009 = torch.aten.int_repr %4008 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%4010 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4011 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4012 = torch.aten.item %4010 : !torch.vtensor<[],f32> -> !torch.float
%4013 = torch.aten.item %4011 : !torch.vtensor<[],si8> -> !torch.int
%4014 = torch.aten._make_per_tensor_quantized_tensor %4009, %4012, %4013 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%4015 = torch.aten.dequantize.self %4014 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%4016 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4017 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1288 = torch.constant.int 12
%4018 = torch.aten.item %4016 : !torch.vtensor<[],f32> -> !torch.float
%4019 = torch.aten.item %4017 : !torch.vtensor<[],si8> -> !torch.int
%4020 = torch.aten.quantize_per_tensor %119, %4018, %4019, %int12_1288 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%4021 = torch.aten.int_repr %4020 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%4022 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4023 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4024 = torch.aten.item %4022 : !torch.vtensor<[],f32> -> !torch.float
%4025 = torch.aten.item %4023 : !torch.vtensor<[],si8> -> !torch.int
%4026 = torch.aten._make_per_tensor_quantized_tensor %4021, %4024, %4025 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%4027 = torch.aten.dequantize.self %4026 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1289 = torch.constant.int 1
%int1_1290 = torch.constant.int 1
%int1_1291 = torch.constant.int 1
%int1_1292 = torch.constant.int 1
%int1_1293 = torch.constant.int 1
%int1_1294 = torch.constant.int 1
%int0_1295 = torch.constant.int 0
%4028 = torch.prim.ListConstruct %int1_1289, %int1_1290 : (!torch.int, !torch.int) -> !torch.list<int>
%4029 = torch.prim.ListConstruct %int1_1291, %int1_1292 : (!torch.int, !torch.int) -> !torch.list<int>
%4030 = torch.prim.ListConstruct %int1_1293, %int1_1294 : (!torch.int, !torch.int) -> !torch.list<int>
%4031 = torch.prim.ListConstruct %int0_1295, %int0_1295 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1296 = torch.constant.bool false
%int1_1297 = torch.constant.int 1
%4032 = torch.aten.convolution %4003, %4015, %4027, %4030, %4028, %4029, %false_1296, %4031, %int1_1297 : !torch.vtensor<[1,512,10,10],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,10,10],f32>
%4033 = torch.aten.relu %4032 : !torch.vtensor<[1,512,10,10],f32> -> !torch.vtensor<[1,512,10,10],f32>
%4034 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4035 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1298 = torch.constant.int 12
%4036 = torch.aten.item %4034 : !torch.vtensor<[],f32> -> !torch.float
%4037 = torch.aten.item %4035 : !torch.vtensor<[],si8> -> !torch.int
%4038 = torch.aten.quantize_per_tensor %4033, %4036, %4037, %int12_1298 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%4039 = torch.aten.int_repr %4038 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%4040 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4041 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4042 = torch.aten.item %4040 : !torch.vtensor<[],f32> -> !torch.float
%4043 = torch.aten.item %4041 : !torch.vtensor<[],si8> -> !torch.int
%4044 = torch.aten._make_per_tensor_quantized_tensor %4039, %4042, %4043 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%4045 = torch.aten.dequantize.self %4044 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%int1_1299 = torch.constant.int 1
%4046 = torch.aten.add.Tensor %4045, %3745, %int1_1299 : !torch.vtensor<[1,512,10,10],f32>, !torch.vtensor<[1,512,10,10],f32>, !torch.int -> !torch.vtensor<[1,512,10,10],f32>
%4047 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4048 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1300 = torch.constant.int 12
%4049 = torch.aten.item %4047 : !torch.vtensor<[],f32> -> !torch.float
%4050 = torch.aten.item %4048 : !torch.vtensor<[],si8> -> !torch.int
%4051 = torch.aten.quantize_per_tensor %4046, %4049, %4050, %int12_1300 : !torch.vtensor<[1,512,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%4052 = torch.aten.int_repr %4051 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],si8>
%4053 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4054 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4055 = torch.aten.item %4053 : !torch.vtensor<[],f32> -> !torch.float
%4056 = torch.aten.item %4054 : !torch.vtensor<[],si8> -> !torch.int
%4057 = torch.aten._make_per_tensor_quantized_tensor %4052, %4055, %4056 : !torch.vtensor<[1,512,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,10,10],!torch.qint8>
%4058 = torch.aten.dequantize.self %4057 : !torch.vtensor<[1,512,10,10],!torch.qint8> -> !torch.vtensor<[1,512,10,10],f32>
%4059 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%4060 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%4061 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1301 = torch.constant.int 0
%int0_1302 = torch.constant.int 0
%int0_1303 = torch.constant.int 0
%4062 = torch.aten.select.int %4061, %int0_1301, %int0_1303 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4063 = torch.aten.item %4062 : !torch.vtensor<[1],si64> -> !torch.int
%4064 = torch.aten.lt.int %4063, %int0_1301 : !torch.int, !torch.int -> !torch.bool
%4065 = torch.aten.Int.bool %4064 : !torch.bool -> !torch.int
%4066 = torch.aten.mul.int %4065, %int0_1302 : !torch.int, !torch.int -> !torch.int
%4067 = torch.aten.add.int %4063, %4066 : !torch.int, !torch.int -> !torch.int
%4068 = torch.prim.ListConstruct %4067 : (!torch.int) -> !torch.list<int>
%false_1304 = torch.constant.bool false
%none_1305 = torch.constant.none
%4069 = torch.aten.tensor %4068, %none_1305, %none_1305, %false_1304 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1306, %indices_1307 = torch.aten.sort %4069, %int0_1301, %false_1304 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1308 = torch.constant.int 0
%4070 = torch.aten.select.int %values_1306, %int0_1301, %int0_1308 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4071 = torch.aten.item %4070 : !torch.vtensor<[1],si64> -> !torch.int
%4072 = torch.aten.unsqueeze %4059, %4071 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4073 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1309 = torch.constant.int 0
%int0_1310 = torch.constant.int 0
%int0_1311 = torch.constant.int 0
%4074 = torch.aten.select.int %4073, %int0_1309, %int0_1311 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4075 = torch.aten.item %4074 : !torch.vtensor<[1],si64> -> !torch.int
%4076 = torch.aten.lt.int %4075, %int0_1309 : !torch.int, !torch.int -> !torch.bool
%4077 = torch.aten.Int.bool %4076 : !torch.bool -> !torch.int
%4078 = torch.aten.mul.int %4077, %int0_1310 : !torch.int, !torch.int -> !torch.int
%4079 = torch.aten.add.int %4075, %4078 : !torch.int, !torch.int -> !torch.int
%4080 = torch.prim.ListConstruct %4079 : (!torch.int) -> !torch.list<int>
%false_1312 = torch.constant.bool false
%none_1313 = torch.constant.none
%4081 = torch.aten.tensor %4080, %none_1313, %none_1313, %false_1312 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1314, %indices_1315 = torch.aten.sort %4081, %int0_1309, %false_1312 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1316 = torch.constant.int 0
%4082 = torch.aten.select.int %values_1314, %int0_1309, %int0_1316 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4083 = torch.aten.item %4082 : !torch.vtensor<[1],si64> -> !torch.int
%4084 = torch.aten.unsqueeze %4060, %4083 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4085 = torch.prim.ListConstruct %4072, %4084 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1317 = torch.constant.int 0
%4086 = torch.aten.cat %4085, %int0_1317 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%4087 = torch.aten._shape_as_tensor %4058 : !torch.vtensor<[1,512,10,10],f32> -> !torch.vtensor<[4],si64>
%4088 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4089 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4090 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1318 = torch.constant.none
%int1_1319 = torch.constant.int 1
%4091 = torch.prim.ListConstruct %int1_1319 : (!torch.int) -> !torch.list<int>
%4092 = torch.aten.ones %4091, %none_1318, %none_1318, %none_1318, %none_1318 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1320 = torch.constant.int 0
%int0_1321 = torch.constant.int 0
%4093 = torch.prim.NumToTensor.Scalar %int0_1321 : !torch.int -> !torch.vtensor<[1],si64>
%4094 = torch.aten.index_select %4089, %int0_1320, %4093 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4095 = torch.aten.item %4094 : !torch.vtensor<[1],si64> -> !torch.int
%4096 = torch.aten.index_select %4090, %int0_1320, %4093 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4097 = torch.aten.item %4096 : !torch.vtensor<[1],si64> -> !torch.int
%4098 = torch.aten.index_select %4088, %int0_1320, %4093 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4099 = torch.aten.item %4098 : !torch.vtensor<[1],si64> -> !torch.int
%4100 = torch.aten.index_select %4092, %int0_1320, %4093 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4101 = torch.aten.item %4100 : !torch.vtensor<[1],si64> -> !torch.int
%4102 = torch.aten.slice.Tensor %4087, %4099, %4095, %4097, %4101 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1322 = torch.constant.int 4
%none_1323 = torch.constant.none
%false_1324 = torch.constant.bool false
%4103 = torch.aten.to.dtype %4086, %int4_1322, %false_1324, %false_1324, %none_1323 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%4104 = torch.prim.ListConstruct %4102, %4103 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1325 = torch.constant.int 0
%4105 = torch.aten.cat %4104, %int0_1325 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%4106 = torch.operator "onnx.Resize"(%4058, %none, %none, %4105) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,512,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%4107 = torch.prim.ListConstruct %4106, %3686 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,512,20,20],f32>) -> !torch.list<vtensor>
%int1_1326 = torch.constant.int 1
%4108 = torch.aten.cat %4107, %int1_1326 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%4109 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1327 = torch.constant.int 12
%4111 = torch.aten.item %4109 : !torch.vtensor<[],f32> -> !torch.float
%4112 = torch.aten.item %4110 : !torch.vtensor<[],si8> -> !torch.int
%4113 = torch.aten.quantize_per_tensor %4108, %4111, %4112, %int12_1327 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%4114 = torch.aten.int_repr %4113 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%4115 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4116 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4117 = torch.aten.item %4115 : !torch.vtensor<[],f32> -> !torch.float
%4118 = torch.aten.item %4116 : !torch.vtensor<[],si8> -> !torch.int
%4119 = torch.aten._make_per_tensor_quantized_tensor %4114, %4117, %4118 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%4120 = torch.aten.dequantize.self %4119 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%4121 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4122 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1328 = torch.constant.int 12
%4123 = torch.aten.item %4121 : !torch.vtensor<[],f32> -> !torch.float
%4124 = torch.aten.item %4122 : !torch.vtensor<[],si8> -> !torch.int
%4125 = torch.aten.quantize_per_tensor %120, %4123, %4124, %int12_1328 : !torch.vtensor<[512,1024,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,1024,3,3],!torch.qint8>
%4126 = torch.aten.int_repr %4125 : !torch.vtensor<[512,1024,3,3],!torch.qint8> -> !torch.vtensor<[512,1024,3,3],si8>
%4127 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4128 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4129 = torch.aten.item %4127 : !torch.vtensor<[],f32> -> !torch.float
%4130 = torch.aten.item %4128 : !torch.vtensor<[],si8> -> !torch.int
%4131 = torch.aten._make_per_tensor_quantized_tensor %4126, %4129, %4130 : !torch.vtensor<[512,1024,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,1024,3,3],!torch.qint8>
%4132 = torch.aten.dequantize.self %4131 : !torch.vtensor<[512,1024,3,3],!torch.qint8> -> !torch.vtensor<[512,1024,3,3],f32>
%4133 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4134 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1329 = torch.constant.int 12
%4135 = torch.aten.item %4133 : !torch.vtensor<[],f32> -> !torch.float
%4136 = torch.aten.item %4134 : !torch.vtensor<[],si8> -> !torch.int
%4137 = torch.aten.quantize_per_tensor %121, %4135, %4136, %int12_1329 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%4138 = torch.aten.int_repr %4137 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%4139 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4140 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4141 = torch.aten.item %4139 : !torch.vtensor<[],f32> -> !torch.float
%4142 = torch.aten.item %4140 : !torch.vtensor<[],si8> -> !torch.int
%4143 = torch.aten._make_per_tensor_quantized_tensor %4138, %4141, %4142 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%4144 = torch.aten.dequantize.self %4143 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1330 = torch.constant.int 1
%int1_1331 = torch.constant.int 1
%int1_1332 = torch.constant.int 1
%int1_1333 = torch.constant.int 1
%int1_1334 = torch.constant.int 1
%int1_1335 = torch.constant.int 1
%int0_1336 = torch.constant.int 0
%4145 = torch.prim.ListConstruct %int1_1330, %int1_1331 : (!torch.int, !torch.int) -> !torch.list<int>
%4146 = torch.prim.ListConstruct %int1_1332, %int1_1333 : (!torch.int, !torch.int) -> !torch.list<int>
%4147 = torch.prim.ListConstruct %int1_1334, %int1_1335 : (!torch.int, !torch.int) -> !torch.list<int>
%4148 = torch.prim.ListConstruct %int0_1336, %int0_1336 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1337 = torch.constant.bool false
%int1_1338 = torch.constant.int 1
%4149 = torch.aten.convolution %4120, %4132, %4144, %4147, %4145, %4146, %false_1337, %4148, %int1_1338 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[512,1024,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%4150 = torch.aten.relu %4149 : !torch.vtensor<[1,512,20,20],f32> -> !torch.vtensor<[1,512,20,20],f32>
%4151 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1339 = torch.constant.int 12
%4153 = torch.aten.item %4151 : !torch.vtensor<[],f32> -> !torch.float
%4154 = torch.aten.item %4152 : !torch.vtensor<[],si8> -> !torch.int
%4155 = torch.aten.quantize_per_tensor %4150, %4153, %4154, %int12_1339 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4156 = torch.aten.int_repr %4155 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%4157 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4159 = torch.aten.item %4157 : !torch.vtensor<[],f32> -> !torch.float
%4160 = torch.aten.item %4158 : !torch.vtensor<[],si8> -> !torch.int
%4161 = torch.aten._make_per_tensor_quantized_tensor %4156, %4159, %4160 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4162 = torch.aten.dequantize.self %4161 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%4163 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1340 = torch.constant.int 12
%4165 = torch.aten.item %4163 : !torch.vtensor<[],f32> -> !torch.float
%4166 = torch.aten.item %4164 : !torch.vtensor<[],si8> -> !torch.int
%4167 = torch.aten.quantize_per_tensor %122, %4165, %4166, %int12_1340 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%4168 = torch.aten.int_repr %4167 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%4169 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4171 = torch.aten.item %4169 : !torch.vtensor<[],f32> -> !torch.float
%4172 = torch.aten.item %4170 : !torch.vtensor<[],si8> -> !torch.int
%4173 = torch.aten._make_per_tensor_quantized_tensor %4168, %4171, %4172 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%4174 = torch.aten.dequantize.self %4173 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%4175 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4176 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1341 = torch.constant.int 12
%4177 = torch.aten.item %4175 : !torch.vtensor<[],f32> -> !torch.float
%4178 = torch.aten.item %4176 : !torch.vtensor<[],si8> -> !torch.int
%4179 = torch.aten.quantize_per_tensor %123, %4177, %4178, %int12_1341 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4180 = torch.aten.int_repr %4179 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4181 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4183 = torch.aten.item %4181 : !torch.vtensor<[],f32> -> !torch.float
%4184 = torch.aten.item %4182 : !torch.vtensor<[],si8> -> !torch.int
%4185 = torch.aten._make_per_tensor_quantized_tensor %4180, %4183, %4184 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4186 = torch.aten.dequantize.self %4185 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_1342 = torch.constant.int 1
%int1_1343 = torch.constant.int 1
%int1_1344 = torch.constant.int 1
%int1_1345 = torch.constant.int 1
%int1_1346 = torch.constant.int 1
%int1_1347 = torch.constant.int 1
%int0_1348 = torch.constant.int 0
%4187 = torch.prim.ListConstruct %int1_1342, %int1_1343 : (!torch.int, !torch.int) -> !torch.list<int>
%4188 = torch.prim.ListConstruct %int1_1344, %int1_1345 : (!torch.int, !torch.int) -> !torch.list<int>
%4189 = torch.prim.ListConstruct %int1_1346, %int1_1347 : (!torch.int, !torch.int) -> !torch.list<int>
%4190 = torch.prim.ListConstruct %int0_1348, %int0_1348 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1349 = torch.constant.bool false
%int1_1350 = torch.constant.int 1
%4191 = torch.aten.convolution %4162, %4174, %4186, %4189, %4187, %4188, %false_1349, %4190, %int1_1350 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%4192 = torch.aten.relu %4191 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%4193 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1351 = torch.constant.int 12
%4195 = torch.aten.item %4193 : !torch.vtensor<[],f32> -> !torch.float
%4196 = torch.aten.item %4194 : !torch.vtensor<[],si8> -> !torch.int
%4197 = torch.aten.quantize_per_tensor %4192, %4195, %4196, %int12_1351 : !torch.vtensor<[1,256,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%4198 = torch.aten.int_repr %4197 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],si8>
%4199 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4201 = torch.aten.item %4199 : !torch.vtensor<[],f32> -> !torch.float
%4202 = torch.aten.item %4200 : !torch.vtensor<[],si8> -> !torch.int
%4203 = torch.aten._make_per_tensor_quantized_tensor %4198, %4201, %4202 : !torch.vtensor<[1,256,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%4204 = torch.aten.dequantize.self %4203 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],f32>
%4205 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1352 = torch.constant.int 12
%4207 = torch.aten.item %4205 : !torch.vtensor<[],f32> -> !torch.float
%4208 = torch.aten.item %4206 : !torch.vtensor<[],si8> -> !torch.int
%4209 = torch.aten.quantize_per_tensor %124, %4207, %4208, %int12_1352 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4210 = torch.aten.int_repr %4209 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%4211 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4213 = torch.aten.item %4211 : !torch.vtensor<[],f32> -> !torch.float
%4214 = torch.aten.item %4212 : !torch.vtensor<[],si8> -> !torch.int
%4215 = torch.aten._make_per_tensor_quantized_tensor %4210, %4213, %4214 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4216 = torch.aten.dequantize.self %4215 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%4217 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4218 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1353 = torch.constant.int 12
%4219 = torch.aten.item %4217 : !torch.vtensor<[],f32> -> !torch.float
%4220 = torch.aten.item %4218 : !torch.vtensor<[],si8> -> !torch.int
%4221 = torch.aten.quantize_per_tensor %125, %4219, %4220, %int12_1353 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4222 = torch.aten.int_repr %4221 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4223 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4224 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4225 = torch.aten.item %4223 : !torch.vtensor<[],f32> -> !torch.float
%4226 = torch.aten.item %4224 : !torch.vtensor<[],si8> -> !torch.int
%4227 = torch.aten._make_per_tensor_quantized_tensor %4222, %4225, %4226 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4228 = torch.aten.dequantize.self %4227 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_1354 = torch.constant.int 2
%int2_1355 = torch.constant.int 2
%int2_1356 = torch.constant.int 2
%int2_1357 = torch.constant.int 2
%int1_1358 = torch.constant.int 1
%int1_1359 = torch.constant.int 1
%int0_1360 = torch.constant.int 0
%4229 = torch.prim.ListConstruct %int2_1354, %int2_1355 : (!torch.int, !torch.int) -> !torch.list<int>
%4230 = torch.prim.ListConstruct %int2_1356, %int2_1357 : (!torch.int, !torch.int) -> !torch.list<int>
%4231 = torch.prim.ListConstruct %int1_1358, %int1_1359 : (!torch.int, !torch.int) -> !torch.list<int>
%4232 = torch.prim.ListConstruct %int0_1360, %int0_1360 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1361 = torch.constant.bool false
%int1_1362 = torch.constant.int 1
%4233 = torch.aten.convolution %4204, %4216, %4228, %4231, %4229, %4230, %false_1361, %4232, %int1_1362 : !torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%4234 = torch.aten.relu %4233 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%4235 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4236 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1363 = torch.constant.int 12
%4237 = torch.aten.item %4235 : !torch.vtensor<[],f32> -> !torch.float
%4238 = torch.aten.item %4236 : !torch.vtensor<[],si8> -> !torch.int
%4239 = torch.aten.quantize_per_tensor %4234, %4237, %4238, %int12_1363 : !torch.vtensor<[1,256,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%4240 = torch.aten.int_repr %4239 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],si8>
%4241 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4242 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4243 = torch.aten.item %4241 : !torch.vtensor<[],f32> -> !torch.float
%4244 = torch.aten.item %4242 : !torch.vtensor<[],si8> -> !torch.int
%4245 = torch.aten._make_per_tensor_quantized_tensor %4240, %4243, %4244 : !torch.vtensor<[1,256,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%4246 = torch.aten.dequantize.self %4245 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],f32>
%4247 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4248 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1364 = torch.constant.int 12
%4249 = torch.aten.item %4247 : !torch.vtensor<[],f32> -> !torch.float
%4250 = torch.aten.item %4248 : !torch.vtensor<[],si8> -> !torch.int
%4251 = torch.aten.quantize_per_tensor %126, %4249, %4250, %int12_1364 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4252 = torch.aten.int_repr %4251 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%4253 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4254 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4255 = torch.aten.item %4253 : !torch.vtensor<[],f32> -> !torch.float
%4256 = torch.aten.item %4254 : !torch.vtensor<[],si8> -> !torch.int
%4257 = torch.aten._make_per_tensor_quantized_tensor %4252, %4255, %4256 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4258 = torch.aten.dequantize.self %4257 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%4259 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4260 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1365 = torch.constant.int 12
%4261 = torch.aten.item %4259 : !torch.vtensor<[],f32> -> !torch.float
%4262 = torch.aten.item %4260 : !torch.vtensor<[],si8> -> !torch.int
%4263 = torch.aten.quantize_per_tensor %127, %4261, %4262, %int12_1365 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4264 = torch.aten.int_repr %4263 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4265 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4266 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4267 = torch.aten.item %4265 : !torch.vtensor<[],f32> -> !torch.float
%4268 = torch.aten.item %4266 : !torch.vtensor<[],si8> -> !torch.int
%4269 = torch.aten._make_per_tensor_quantized_tensor %4264, %4267, %4268 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4270 = torch.aten.dequantize.self %4269 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int4_1366 = torch.constant.int 4
%int4_1367 = torch.constant.int 4
%int4_1368 = torch.constant.int 4
%int4_1369 = torch.constant.int 4
%int1_1370 = torch.constant.int 1
%int1_1371 = torch.constant.int 1
%int0_1372 = torch.constant.int 0
%4271 = torch.prim.ListConstruct %int4_1366, %int4_1367 : (!torch.int, !torch.int) -> !torch.list<int>
%4272 = torch.prim.ListConstruct %int4_1368, %int4_1369 : (!torch.int, !torch.int) -> !torch.list<int>
%4273 = torch.prim.ListConstruct %int1_1370, %int1_1371 : (!torch.int, !torch.int) -> !torch.list<int>
%4274 = torch.prim.ListConstruct %int0_1372, %int0_1372 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1373 = torch.constant.bool false
%int1_1374 = torch.constant.int 1
%4275 = torch.aten.convolution %4246, %4258, %4270, %4273, %4271, %4272, %false_1373, %4274, %int1_1374 : !torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%4276 = torch.aten.relu %4275 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%4277 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4278 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1375 = torch.constant.int 12
%4279 = torch.aten.item %4277 : !torch.vtensor<[],f32> -> !torch.float
%4280 = torch.aten.item %4278 : !torch.vtensor<[],si8> -> !torch.int
%4281 = torch.aten.quantize_per_tensor %4276, %4279, %4280, %int12_1375 : !torch.vtensor<[1,256,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%4282 = torch.aten.int_repr %4281 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],si8>
%4283 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4284 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4285 = torch.aten.item %4283 : !torch.vtensor<[],f32> -> !torch.float
%4286 = torch.aten.item %4284 : !torch.vtensor<[],si8> -> !torch.int
%4287 = torch.aten._make_per_tensor_quantized_tensor %4282, %4285, %4286 : !torch.vtensor<[1,256,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,20,20],!torch.qint8>
%4288 = torch.aten.dequantize.self %4287 : !torch.vtensor<[1,256,20,20],!torch.qint8> -> !torch.vtensor<[1,256,20,20],f32>
%4289 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4290 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1376 = torch.constant.int 12
%4291 = torch.aten.item %4289 : !torch.vtensor<[],f32> -> !torch.float
%4292 = torch.aten.item %4290 : !torch.vtensor<[],si8> -> !torch.int
%4293 = torch.aten.quantize_per_tensor %128, %4291, %4292, %int12_1376 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4294 = torch.aten.int_repr %4293 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%4295 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4296 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4297 = torch.aten.item %4295 : !torch.vtensor<[],f32> -> !torch.float
%4298 = torch.aten.item %4296 : !torch.vtensor<[],si8> -> !torch.int
%4299 = torch.aten._make_per_tensor_quantized_tensor %4294, %4297, %4298 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4300 = torch.aten.dequantize.self %4299 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%4301 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4302 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1377 = torch.constant.int 12
%4303 = torch.aten.item %4301 : !torch.vtensor<[],f32> -> !torch.float
%4304 = torch.aten.item %4302 : !torch.vtensor<[],si8> -> !torch.int
%4305 = torch.aten.quantize_per_tensor %129, %4303, %4304, %int12_1377 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4306 = torch.aten.int_repr %4305 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4307 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4308 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4309 = torch.aten.item %4307 : !torch.vtensor<[],f32> -> !torch.float
%4310 = torch.aten.item %4308 : !torch.vtensor<[],si8> -> !torch.int
%4311 = torch.aten._make_per_tensor_quantized_tensor %4306, %4309, %4310 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4312 = torch.aten.dequantize.self %4311 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int8_1378 = torch.constant.int 8
%int8_1379 = torch.constant.int 8
%int8_1380 = torch.constant.int 8
%int8_1381 = torch.constant.int 8
%int1_1382 = torch.constant.int 1
%int1_1383 = torch.constant.int 1
%int0_1384 = torch.constant.int 0
%4313 = torch.prim.ListConstruct %int8_1378, %int8_1379 : (!torch.int, !torch.int) -> !torch.list<int>
%4314 = torch.prim.ListConstruct %int8_1380, %int8_1381 : (!torch.int, !torch.int) -> !torch.list<int>
%4315 = torch.prim.ListConstruct %int1_1382, %int1_1383 : (!torch.int, !torch.int) -> !torch.list<int>
%4316 = torch.prim.ListConstruct %int0_1384, %int0_1384 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1385 = torch.constant.bool false
%int1_1386 = torch.constant.int 1
%4317 = torch.aten.convolution %4288, %4300, %4312, %4315, %4313, %4314, %false_1385, %4316, %int1_1386 : !torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%4318 = torch.aten.relu %4317 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%4319 = torch.prim.ListConstruct %4318, %4288 : (!torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[1,256,20,20],f32>) -> !torch.list<vtensor>
%int1_1387 = torch.constant.int 1
%4320 = torch.aten.cat %4319, %int1_1387 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%4321 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4322 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1388 = torch.constant.int 12
%4323 = torch.aten.item %4321 : !torch.vtensor<[],f32> -> !torch.float
%4324 = torch.aten.item %4322 : !torch.vtensor<[],si8> -> !torch.int
%4325 = torch.aten.quantize_per_tensor %4320, %4323, %4324, %int12_1388 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4326 = torch.aten.int_repr %4325 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%4327 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4328 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4329 = torch.aten.item %4327 : !torch.vtensor<[],f32> -> !torch.float
%4330 = torch.aten.item %4328 : !torch.vtensor<[],si8> -> !torch.int
%4331 = torch.aten._make_per_tensor_quantized_tensor %4326, %4329, %4330 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4332 = torch.aten.dequantize.self %4331 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%4333 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4334 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1389 = torch.constant.int 12
%4335 = torch.aten.item %4333 : !torch.vtensor<[],f32> -> !torch.float
%4336 = torch.aten.item %4334 : !torch.vtensor<[],si8> -> !torch.int
%4337 = torch.aten.quantize_per_tensor %130, %4335, %4336, %int12_1389 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%4338 = torch.aten.int_repr %4337 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%4339 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4340 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4341 = torch.aten.item %4339 : !torch.vtensor<[],f32> -> !torch.float
%4342 = torch.aten.item %4340 : !torch.vtensor<[],si8> -> !torch.int
%4343 = torch.aten._make_per_tensor_quantized_tensor %4338, %4341, %4342 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%4344 = torch.aten.dequantize.self %4343 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%4345 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4346 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1390 = torch.constant.int 12
%4347 = torch.aten.item %4345 : !torch.vtensor<[],f32> -> !torch.float
%4348 = torch.aten.item %4346 : !torch.vtensor<[],si8> -> !torch.int
%4349 = torch.aten.quantize_per_tensor %131, %4347, %4348, %int12_1390 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4350 = torch.aten.int_repr %4349 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4351 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4352 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4353 = torch.aten.item %4351 : !torch.vtensor<[],f32> -> !torch.float
%4354 = torch.aten.item %4352 : !torch.vtensor<[],si8> -> !torch.int
%4355 = torch.aten._make_per_tensor_quantized_tensor %4350, %4353, %4354 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4356 = torch.aten.dequantize.self %4355 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int4_1391 = torch.constant.int 4
%int4_1392 = torch.constant.int 4
%int4_1393 = torch.constant.int 4
%int4_1394 = torch.constant.int 4
%int1_1395 = torch.constant.int 1
%int1_1396 = torch.constant.int 1
%int0_1397 = torch.constant.int 0
%4357 = torch.prim.ListConstruct %int4_1391, %int4_1392 : (!torch.int, !torch.int) -> !torch.list<int>
%4358 = torch.prim.ListConstruct %int4_1393, %int4_1394 : (!torch.int, !torch.int) -> !torch.list<int>
%4359 = torch.prim.ListConstruct %int1_1395, %int1_1396 : (!torch.int, !torch.int) -> !torch.list<int>
%4360 = torch.prim.ListConstruct %int0_1397, %int0_1397 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1398 = torch.constant.bool false
%int1_1399 = torch.constant.int 1
%4361 = torch.aten.convolution %4332, %4344, %4356, %4359, %4357, %4358, %false_1398, %4360, %int1_1399 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%4362 = torch.aten.relu %4361 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%4363 = torch.prim.ListConstruct %4362, %4246 : (!torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[1,256,20,20],f32>) -> !torch.list<vtensor>
%int1_1400 = torch.constant.int 1
%4364 = torch.aten.cat %4363, %int1_1400 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%4365 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4366 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1401 = torch.constant.int 12
%4367 = torch.aten.item %4365 : !torch.vtensor<[],f32> -> !torch.float
%4368 = torch.aten.item %4366 : !torch.vtensor<[],si8> -> !torch.int
%4369 = torch.aten.quantize_per_tensor %4364, %4367, %4368, %int12_1401 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4370 = torch.aten.int_repr %4369 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%4371 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4372 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4373 = torch.aten.item %4371 : !torch.vtensor<[],f32> -> !torch.float
%4374 = torch.aten.item %4372 : !torch.vtensor<[],si8> -> !torch.int
%4375 = torch.aten._make_per_tensor_quantized_tensor %4370, %4373, %4374 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4376 = torch.aten.dequantize.self %4375 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%4377 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4378 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1402 = torch.constant.int 12
%4379 = torch.aten.item %4377 : !torch.vtensor<[],f32> -> !torch.float
%4380 = torch.aten.item %4378 : !torch.vtensor<[],si8> -> !torch.int
%4381 = torch.aten.quantize_per_tensor %132, %4379, %4380, %int12_1402 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%4382 = torch.aten.int_repr %4381 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%4383 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4384 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4385 = torch.aten.item %4383 : !torch.vtensor<[],f32> -> !torch.float
%4386 = torch.aten.item %4384 : !torch.vtensor<[],si8> -> !torch.int
%4387 = torch.aten._make_per_tensor_quantized_tensor %4382, %4385, %4386 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%4388 = torch.aten.dequantize.self %4387 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%4389 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4390 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1403 = torch.constant.int 12
%4391 = torch.aten.item %4389 : !torch.vtensor<[],f32> -> !torch.float
%4392 = torch.aten.item %4390 : !torch.vtensor<[],si8> -> !torch.int
%4393 = torch.aten.quantize_per_tensor %133, %4391, %4392, %int12_1403 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4394 = torch.aten.int_repr %4393 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4395 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4396 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4397 = torch.aten.item %4395 : !torch.vtensor<[],f32> -> !torch.float
%4398 = torch.aten.item %4396 : !torch.vtensor<[],si8> -> !torch.int
%4399 = torch.aten._make_per_tensor_quantized_tensor %4394, %4397, %4398 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4400 = torch.aten.dequantize.self %4399 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int2_1404 = torch.constant.int 2
%int2_1405 = torch.constant.int 2
%int2_1406 = torch.constant.int 2
%int2_1407 = torch.constant.int 2
%int1_1408 = torch.constant.int 1
%int1_1409 = torch.constant.int 1
%int0_1410 = torch.constant.int 0
%4401 = torch.prim.ListConstruct %int2_1404, %int2_1405 : (!torch.int, !torch.int) -> !torch.list<int>
%4402 = torch.prim.ListConstruct %int2_1406, %int2_1407 : (!torch.int, !torch.int) -> !torch.list<int>
%4403 = torch.prim.ListConstruct %int1_1408, %int1_1409 : (!torch.int, !torch.int) -> !torch.list<int>
%4404 = torch.prim.ListConstruct %int0_1410, %int0_1410 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1411 = torch.constant.bool false
%int1_1412 = torch.constant.int 1
%4405 = torch.aten.convolution %4376, %4388, %4400, %4403, %4401, %4402, %false_1411, %4404, %int1_1412 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,20,20],f32>
%4406 = torch.aten.relu %4405 : !torch.vtensor<[1,256,20,20],f32> -> !torch.vtensor<[1,256,20,20],f32>
%4407 = torch.prim.ListConstruct %4406, %4204 : (!torch.vtensor<[1,256,20,20],f32>, !torch.vtensor<[1,256,20,20],f32>) -> !torch.list<vtensor>
%int1_1413 = torch.constant.int 1
%4408 = torch.aten.cat %4407, %int1_1413 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%4409 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4410 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1414 = torch.constant.int 12
%4411 = torch.aten.item %4409 : !torch.vtensor<[],f32> -> !torch.float
%4412 = torch.aten.item %4410 : !torch.vtensor<[],si8> -> !torch.int
%4413 = torch.aten.quantize_per_tensor %4408, %4411, %4412, %int12_1414 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4414 = torch.aten.int_repr %4413 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%4415 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4416 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4417 = torch.aten.item %4415 : !torch.vtensor<[],f32> -> !torch.float
%4418 = torch.aten.item %4416 : !torch.vtensor<[],si8> -> !torch.int
%4419 = torch.aten._make_per_tensor_quantized_tensor %4414, %4417, %4418 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4420 = torch.aten.dequantize.self %4419 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%4421 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4422 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1415 = torch.constant.int 12
%4423 = torch.aten.item %4421 : !torch.vtensor<[],f32> -> !torch.float
%4424 = torch.aten.item %4422 : !torch.vtensor<[],si8> -> !torch.int
%4425 = torch.aten.quantize_per_tensor %134, %4423, %4424, %int12_1415 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%4426 = torch.aten.int_repr %4425 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%4427 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4428 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4429 = torch.aten.item %4427 : !torch.vtensor<[],f32> -> !torch.float
%4430 = torch.aten.item %4428 : !torch.vtensor<[],si8> -> !torch.int
%4431 = torch.aten._make_per_tensor_quantized_tensor %4426, %4429, %4430 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%4432 = torch.aten.dequantize.self %4431 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%4433 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4434 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1416 = torch.constant.int 12
%4435 = torch.aten.item %4433 : !torch.vtensor<[],f32> -> !torch.float
%4436 = torch.aten.item %4434 : !torch.vtensor<[],si8> -> !torch.int
%4437 = torch.aten.quantize_per_tensor %135, %4435, %4436, %int12_1416 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%4438 = torch.aten.int_repr %4437 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%4439 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4440 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4441 = torch.aten.item %4439 : !torch.vtensor<[],f32> -> !torch.float
%4442 = torch.aten.item %4440 : !torch.vtensor<[],si8> -> !torch.int
%4443 = torch.aten._make_per_tensor_quantized_tensor %4438, %4441, %4442 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%4444 = torch.aten.dequantize.self %4443 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_1417 = torch.constant.int 1
%int1_1418 = torch.constant.int 1
%int1_1419 = torch.constant.int 1
%int1_1420 = torch.constant.int 1
%int1_1421 = torch.constant.int 1
%int1_1422 = torch.constant.int 1
%int0_1423 = torch.constant.int 0
%4445 = torch.prim.ListConstruct %int1_1417, %int1_1418 : (!torch.int, !torch.int) -> !torch.list<int>
%4446 = torch.prim.ListConstruct %int1_1419, %int1_1420 : (!torch.int, !torch.int) -> !torch.list<int>
%4447 = torch.prim.ListConstruct %int1_1421, %int1_1422 : (!torch.int, !torch.int) -> !torch.list<int>
%4448 = torch.prim.ListConstruct %int0_1423, %int0_1423 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1424 = torch.constant.bool false
%int1_1425 = torch.constant.int 1
%4449 = torch.aten.convolution %4420, %4432, %4444, %4447, %4445, %4446, %false_1424, %4448, %int1_1425 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%4450 = torch.aten.relu %4449 : !torch.vtensor<[1,512,20,20],f32> -> !torch.vtensor<[1,512,20,20],f32>
%4451 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4452 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1426 = torch.constant.int 12
%4453 = torch.aten.item %4451 : !torch.vtensor<[],f32> -> !torch.float
%4454 = torch.aten.item %4452 : !torch.vtensor<[],si8> -> !torch.int
%4455 = torch.aten.quantize_per_tensor %4450, %4453, %4454, %int12_1426 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4456 = torch.aten.int_repr %4455 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%4457 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4458 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4459 = torch.aten.item %4457 : !torch.vtensor<[],f32> -> !torch.float
%4460 = torch.aten.item %4458 : !torch.vtensor<[],si8> -> !torch.int
%4461 = torch.aten._make_per_tensor_quantized_tensor %4456, %4459, %4460 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4462 = torch.aten.dequantize.self %4461 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%int1_1427 = torch.constant.int 1
%4463 = torch.aten.add.Tensor %4462, %4162, %int1_1427 : !torch.vtensor<[1,512,20,20],f32>, !torch.vtensor<[1,512,20,20],f32>, !torch.int -> !torch.vtensor<[1,512,20,20],f32>
%4464 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4465 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1428 = torch.constant.int 12
%4466 = torch.aten.item %4464 : !torch.vtensor<[],f32> -> !torch.float
%4467 = torch.aten.item %4465 : !torch.vtensor<[],si8> -> !torch.int
%4468 = torch.aten.quantize_per_tensor %4463, %4466, %4467, %int12_1428 : !torch.vtensor<[1,512,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4469 = torch.aten.int_repr %4468 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],si8>
%4470 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4472 = torch.aten.item %4470 : !torch.vtensor<[],f32> -> !torch.float
%4473 = torch.aten.item %4471 : !torch.vtensor<[],si8> -> !torch.int
%4474 = torch.aten._make_per_tensor_quantized_tensor %4469, %4472, %4473 : !torch.vtensor<[1,512,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,20,20],!torch.qint8>
%4475 = torch.aten.dequantize.self %4474 : !torch.vtensor<[1,512,20,20],!torch.qint8> -> !torch.vtensor<[1,512,20,20],f32>
%4476 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%4477 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%4478 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1429 = torch.constant.int 0
%int0_1430 = torch.constant.int 0
%int0_1431 = torch.constant.int 0
%4479 = torch.aten.select.int %4478, %int0_1429, %int0_1431 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4480 = torch.aten.item %4479 : !torch.vtensor<[1],si64> -> !torch.int
%4481 = torch.aten.lt.int %4480, %int0_1429 : !torch.int, !torch.int -> !torch.bool
%4482 = torch.aten.Int.bool %4481 : !torch.bool -> !torch.int
%4483 = torch.aten.mul.int %4482, %int0_1430 : !torch.int, !torch.int -> !torch.int
%4484 = torch.aten.add.int %4480, %4483 : !torch.int, !torch.int -> !torch.int
%4485 = torch.prim.ListConstruct %4484 : (!torch.int) -> !torch.list<int>
%false_1432 = torch.constant.bool false
%none_1433 = torch.constant.none
%4486 = torch.aten.tensor %4485, %none_1433, %none_1433, %false_1432 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1434, %indices_1435 = torch.aten.sort %4486, %int0_1429, %false_1432 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1436 = torch.constant.int 0
%4487 = torch.aten.select.int %values_1434, %int0_1429, %int0_1436 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4488 = torch.aten.item %4487 : !torch.vtensor<[1],si64> -> !torch.int
%4489 = torch.aten.unsqueeze %4476, %4488 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4490 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1437 = torch.constant.int 0
%int0_1438 = torch.constant.int 0
%int0_1439 = torch.constant.int 0
%4491 = torch.aten.select.int %4490, %int0_1437, %int0_1439 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4492 = torch.aten.item %4491 : !torch.vtensor<[1],si64> -> !torch.int
%4493 = torch.aten.lt.int %4492, %int0_1437 : !torch.int, !torch.int -> !torch.bool
%4494 = torch.aten.Int.bool %4493 : !torch.bool -> !torch.int
%4495 = torch.aten.mul.int %4494, %int0_1438 : !torch.int, !torch.int -> !torch.int
%4496 = torch.aten.add.int %4492, %4495 : !torch.int, !torch.int -> !torch.int
%4497 = torch.prim.ListConstruct %4496 : (!torch.int) -> !torch.list<int>
%false_1440 = torch.constant.bool false
%none_1441 = torch.constant.none
%4498 = torch.aten.tensor %4497, %none_1441, %none_1441, %false_1440 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1442, %indices_1443 = torch.aten.sort %4498, %int0_1437, %false_1440 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1444 = torch.constant.int 0
%4499 = torch.aten.select.int %values_1442, %int0_1437, %int0_1444 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4500 = torch.aten.item %4499 : !torch.vtensor<[1],si64> -> !torch.int
%4501 = torch.aten.unsqueeze %4477, %4500 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4502 = torch.prim.ListConstruct %4489, %4501 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1445 = torch.constant.int 0
%4503 = torch.aten.cat %4502, %int0_1445 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%4504 = torch.aten._shape_as_tensor %4475 : !torch.vtensor<[1,512,20,20],f32> -> !torch.vtensor<[4],si64>
%4505 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4506 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4507 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1446 = torch.constant.none
%int1_1447 = torch.constant.int 1
%4508 = torch.prim.ListConstruct %int1_1447 : (!torch.int) -> !torch.list<int>
%4509 = torch.aten.ones %4508, %none_1446, %none_1446, %none_1446, %none_1446 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1448 = torch.constant.int 0
%int0_1449 = torch.constant.int 0
%4510 = torch.prim.NumToTensor.Scalar %int0_1449 : !torch.int -> !torch.vtensor<[1],si64>
%4511 = torch.aten.index_select %4506, %int0_1448, %4510 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4512 = torch.aten.item %4511 : !torch.vtensor<[1],si64> -> !torch.int
%4513 = torch.aten.index_select %4507, %int0_1448, %4510 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4514 = torch.aten.item %4513 : !torch.vtensor<[1],si64> -> !torch.int
%4515 = torch.aten.index_select %4505, %int0_1448, %4510 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4516 = torch.aten.item %4515 : !torch.vtensor<[1],si64> -> !torch.int
%4517 = torch.aten.index_select %4509, %int0_1448, %4510 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4518 = torch.aten.item %4517 : !torch.vtensor<[1],si64> -> !torch.int
%4519 = torch.aten.slice.Tensor %4504, %4516, %4512, %4514, %4518 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1450 = torch.constant.int 4
%none_1451 = torch.constant.none
%false_1452 = torch.constant.bool false
%4520 = torch.aten.to.dtype %4503, %int4_1450, %false_1452, %false_1452, %none_1451 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%4521 = torch.prim.ListConstruct %4519, %4520 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1453 = torch.constant.int 0
%4522 = torch.aten.cat %4521, %int0_1453 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%4523 = torch.operator "onnx.Resize"(%4475, %none, %none, %4522) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,512,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%4524 = torch.prim.ListConstruct %4523, %3314 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,512,40,40],f32>) -> !torch.list<vtensor>
%int1_1454 = torch.constant.int 1
%4525 = torch.aten.cat %4524, %int1_1454 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%4526 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4527 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1455 = torch.constant.int 12
%4528 = torch.aten.item %4526 : !torch.vtensor<[],f32> -> !torch.float
%4529 = torch.aten.item %4527 : !torch.vtensor<[],si8> -> !torch.int
%4530 = torch.aten.quantize_per_tensor %4525, %4528, %4529, %int12_1455 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%4531 = torch.aten.int_repr %4530 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%4532 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4533 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4534 = torch.aten.item %4532 : !torch.vtensor<[],f32> -> !torch.float
%4535 = torch.aten.item %4533 : !torch.vtensor<[],si8> -> !torch.int
%4536 = torch.aten._make_per_tensor_quantized_tensor %4531, %4534, %4535 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%4537 = torch.aten.dequantize.self %4536 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%4538 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4539 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1456 = torch.constant.int 12
%4540 = torch.aten.item %4538 : !torch.vtensor<[],f32> -> !torch.float
%4541 = torch.aten.item %4539 : !torch.vtensor<[],si8> -> !torch.int
%4542 = torch.aten.quantize_per_tensor %136, %4540, %4541, %int12_1456 : !torch.vtensor<[256,1024,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,3,3],!torch.qint8>
%4543 = torch.aten.int_repr %4542 : !torch.vtensor<[256,1024,3,3],!torch.qint8> -> !torch.vtensor<[256,1024,3,3],si8>
%4544 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4545 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4546 = torch.aten.item %4544 : !torch.vtensor<[],f32> -> !torch.float
%4547 = torch.aten.item %4545 : !torch.vtensor<[],si8> -> !torch.int
%4548 = torch.aten._make_per_tensor_quantized_tensor %4543, %4546, %4547 : !torch.vtensor<[256,1024,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,3,3],!torch.qint8>
%4549 = torch.aten.dequantize.self %4548 : !torch.vtensor<[256,1024,3,3],!torch.qint8> -> !torch.vtensor<[256,1024,3,3],f32>
%4550 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4551 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1457 = torch.constant.int 12
%4552 = torch.aten.item %4550 : !torch.vtensor<[],f32> -> !torch.float
%4553 = torch.aten.item %4551 : !torch.vtensor<[],si8> -> !torch.int
%4554 = torch.aten.quantize_per_tensor %137, %4552, %4553, %int12_1457 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4555 = torch.aten.int_repr %4554 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%4556 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4557 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4558 = torch.aten.item %4556 : !torch.vtensor<[],f32> -> !torch.float
%4559 = torch.aten.item %4557 : !torch.vtensor<[],si8> -> !torch.int
%4560 = torch.aten._make_per_tensor_quantized_tensor %4555, %4558, %4559 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%4561 = torch.aten.dequantize.self %4560 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_1458 = torch.constant.int 1
%int1_1459 = torch.constant.int 1
%int1_1460 = torch.constant.int 1
%int1_1461 = torch.constant.int 1
%int1_1462 = torch.constant.int 1
%int1_1463 = torch.constant.int 1
%int0_1464 = torch.constant.int 0
%4562 = torch.prim.ListConstruct %int1_1458, %int1_1459 : (!torch.int, !torch.int) -> !torch.list<int>
%4563 = torch.prim.ListConstruct %int1_1460, %int1_1461 : (!torch.int, !torch.int) -> !torch.list<int>
%4564 = torch.prim.ListConstruct %int1_1462, %int1_1463 : (!torch.int, !torch.int) -> !torch.list<int>
%4565 = torch.prim.ListConstruct %int0_1464, %int0_1464 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1465 = torch.constant.bool false
%int1_1466 = torch.constant.int 1
%4566 = torch.aten.convolution %4537, %4549, %4561, %4564, %4562, %4563, %false_1465, %4565, %int1_1466 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[256,1024,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,40,40],f32>
%4567 = torch.aten.relu %4566 : !torch.vtensor<[1,256,40,40],f32> -> !torch.vtensor<[1,256,40,40],f32>
%4568 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4569 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1467 = torch.constant.int 12
%4570 = torch.aten.item %4568 : !torch.vtensor<[],f32> -> !torch.float
%4571 = torch.aten.item %4569 : !torch.vtensor<[],si8> -> !torch.int
%4572 = torch.aten.quantize_per_tensor %4567, %4570, %4571, %int12_1467 : !torch.vtensor<[1,256,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%4573 = torch.aten.int_repr %4572 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],si8>
%4574 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4575 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4576 = torch.aten.item %4574 : !torch.vtensor<[],f32> -> !torch.float
%4577 = torch.aten.item %4575 : !torch.vtensor<[],si8> -> !torch.int
%4578 = torch.aten._make_per_tensor_quantized_tensor %4573, %4576, %4577 : !torch.vtensor<[1,256,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%4579 = torch.aten.dequantize.self %4578 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],f32>
%4580 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4581 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1468 = torch.constant.int 12
%4582 = torch.aten.item %4580 : !torch.vtensor<[],f32> -> !torch.float
%4583 = torch.aten.item %4581 : !torch.vtensor<[],si8> -> !torch.int
%4584 = torch.aten.quantize_per_tensor %138, %4582, %4583, %int12_1468 : !torch.vtensor<[128,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%4585 = torch.aten.int_repr %4584 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],si8>
%4586 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4587 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4588 = torch.aten.item %4586 : !torch.vtensor<[],f32> -> !torch.float
%4589 = torch.aten.item %4587 : !torch.vtensor<[],si8> -> !torch.int
%4590 = torch.aten._make_per_tensor_quantized_tensor %4585, %4588, %4589 : !torch.vtensor<[128,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%4591 = torch.aten.dequantize.self %4590 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],f32>
%4592 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4593 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1469 = torch.constant.int 12
%4594 = torch.aten.item %4592 : !torch.vtensor<[],f32> -> !torch.float
%4595 = torch.aten.item %4593 : !torch.vtensor<[],si8> -> !torch.int
%4596 = torch.aten.quantize_per_tensor %139, %4594, %4595, %int12_1469 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4597 = torch.aten.int_repr %4596 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%4598 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4599 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4600 = torch.aten.item %4598 : !torch.vtensor<[],f32> -> !torch.float
%4601 = torch.aten.item %4599 : !torch.vtensor<[],si8> -> !torch.int
%4602 = torch.aten._make_per_tensor_quantized_tensor %4597, %4600, %4601 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4603 = torch.aten.dequantize.self %4602 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1470 = torch.constant.int 1
%int1_1471 = torch.constant.int 1
%int1_1472 = torch.constant.int 1
%int1_1473 = torch.constant.int 1
%int1_1474 = torch.constant.int 1
%int1_1475 = torch.constant.int 1
%int0_1476 = torch.constant.int 0
%4604 = torch.prim.ListConstruct %int1_1470, %int1_1471 : (!torch.int, !torch.int) -> !torch.list<int>
%4605 = torch.prim.ListConstruct %int1_1472, %int1_1473 : (!torch.int, !torch.int) -> !torch.list<int>
%4606 = torch.prim.ListConstruct %int1_1474, %int1_1475 : (!torch.int, !torch.int) -> !torch.list<int>
%4607 = torch.prim.ListConstruct %int0_1476, %int0_1476 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1477 = torch.constant.bool false
%int1_1478 = torch.constant.int 1
%4608 = torch.aten.convolution %4579, %4591, %4603, %4606, %4604, %4605, %false_1477, %4607, %int1_1478 : !torch.vtensor<[1,256,40,40],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,40,40],f32>
%4609 = torch.aten.relu %4608 : !torch.vtensor<[1,128,40,40],f32> -> !torch.vtensor<[1,128,40,40],f32>
%4610 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4611 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1479 = torch.constant.int 12
%4612 = torch.aten.item %4610 : !torch.vtensor<[],f32> -> !torch.float
%4613 = torch.aten.item %4611 : !torch.vtensor<[],si8> -> !torch.int
%4614 = torch.aten.quantize_per_tensor %4609, %4612, %4613, %int12_1479 : !torch.vtensor<[1,128,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,40,40],!torch.qint8>
%4615 = torch.aten.int_repr %4614 : !torch.vtensor<[1,128,40,40],!torch.qint8> -> !torch.vtensor<[1,128,40,40],si8>
%4616 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4617 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4618 = torch.aten.item %4616 : !torch.vtensor<[],f32> -> !torch.float
%4619 = torch.aten.item %4617 : !torch.vtensor<[],si8> -> !torch.int
%4620 = torch.aten._make_per_tensor_quantized_tensor %4615, %4618, %4619 : !torch.vtensor<[1,128,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,40,40],!torch.qint8>
%4621 = torch.aten.dequantize.self %4620 : !torch.vtensor<[1,128,40,40],!torch.qint8> -> !torch.vtensor<[1,128,40,40],f32>
%int2_1480 = torch.constant.int 2
%int2_1481 = torch.constant.int 2
%4622 = torch.prim.ListConstruct %int2_1480, %int2_1481 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1482 = torch.constant.int 0
%int0_1483 = torch.constant.int 0
%4623 = torch.prim.ListConstruct %int0_1482, %int0_1483 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1484 = torch.constant.int 2
%int2_1485 = torch.constant.int 2
%4624 = torch.prim.ListConstruct %int2_1484, %int2_1485 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1486 = torch.constant.int 1
%int1_1487 = torch.constant.int 1
%4625 = torch.prim.ListConstruct %int1_1486, %int1_1487 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1488 = torch.constant.bool true
%4626 = torch.aten.max_pool2d %4621, %4622, %4624, %4623, %4625, %true_1488 : !torch.vtensor<[1,128,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,128,20,20],f32>
%4627 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4628 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1489 = torch.constant.int 12
%4629 = torch.aten.item %4627 : !torch.vtensor<[],f32> -> !torch.float
%4630 = torch.aten.item %4628 : !torch.vtensor<[],si8> -> !torch.int
%4631 = torch.aten.quantize_per_tensor %4626, %4629, %4630, %int12_1489 : !torch.vtensor<[1,128,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%4632 = torch.aten.int_repr %4631 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],si8>
%4633 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4634 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4635 = torch.aten.item %4633 : !torch.vtensor<[],f32> -> !torch.float
%4636 = torch.aten.item %4634 : !torch.vtensor<[],si8> -> !torch.int
%4637 = torch.aten._make_per_tensor_quantized_tensor %4632, %4635, %4636 : !torch.vtensor<[1,128,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%4638 = torch.aten.dequantize.self %4637 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],f32>
%4639 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4640 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1490 = torch.constant.int 12
%4641 = torch.aten.item %4639 : !torch.vtensor<[],f32> -> !torch.float
%4642 = torch.aten.item %4640 : !torch.vtensor<[],si8> -> !torch.int
%4643 = torch.aten.quantize_per_tensor %140, %4641, %4642, %int12_1490 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%4644 = torch.aten.int_repr %4643 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%4645 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4647 = torch.aten.item %4645 : !torch.vtensor<[],f32> -> !torch.float
%4648 = torch.aten.item %4646 : !torch.vtensor<[],si8> -> !torch.int
%4649 = torch.aten._make_per_tensor_quantized_tensor %4644, %4647, %4648 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%4650 = torch.aten.dequantize.self %4649 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%4651 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4652 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1491 = torch.constant.int 12
%4653 = torch.aten.item %4651 : !torch.vtensor<[],f32> -> !torch.float
%4654 = torch.aten.item %4652 : !torch.vtensor<[],si8> -> !torch.int
%4655 = torch.aten.quantize_per_tensor %141, %4653, %4654, %int12_1491 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4656 = torch.aten.int_repr %4655 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%4657 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4658 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4659 = torch.aten.item %4657 : !torch.vtensor<[],f32> -> !torch.float
%4660 = torch.aten.item %4658 : !torch.vtensor<[],si8> -> !torch.int
%4661 = torch.aten._make_per_tensor_quantized_tensor %4656, %4659, %4660 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4662 = torch.aten.dequantize.self %4661 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1492 = torch.constant.int 1
%int1_1493 = torch.constant.int 1
%int1_1494 = torch.constant.int 1
%int1_1495 = torch.constant.int 1
%int1_1496 = torch.constant.int 1
%int1_1497 = torch.constant.int 1
%int0_1498 = torch.constant.int 0
%4663 = torch.prim.ListConstruct %int1_1492, %int1_1493 : (!torch.int, !torch.int) -> !torch.list<int>
%4664 = torch.prim.ListConstruct %int1_1494, %int1_1495 : (!torch.int, !torch.int) -> !torch.list<int>
%4665 = torch.prim.ListConstruct %int1_1496, %int1_1497 : (!torch.int, !torch.int) -> !torch.list<int>
%4666 = torch.prim.ListConstruct %int0_1498, %int0_1498 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1499 = torch.constant.bool false
%int1_1500 = torch.constant.int 1
%4667 = torch.aten.convolution %4638, %4650, %4662, %4665, %4663, %4664, %false_1499, %4666, %int1_1500 : !torch.vtensor<[1,128,20,20],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,20,20],f32>
%4668 = torch.aten.relu %4667 : !torch.vtensor<[1,128,20,20],f32> -> !torch.vtensor<[1,128,20,20],f32>
%4669 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4670 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1501 = torch.constant.int 12
%4671 = torch.aten.item %4669 : !torch.vtensor<[],f32> -> !torch.float
%4672 = torch.aten.item %4670 : !torch.vtensor<[],si8> -> !torch.int
%4673 = torch.aten.quantize_per_tensor %4668, %4671, %4672, %int12_1501 : !torch.vtensor<[1,128,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%4674 = torch.aten.int_repr %4673 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],si8>
%4675 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4676 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4677 = torch.aten.item %4675 : !torch.vtensor<[],f32> -> !torch.float
%4678 = torch.aten.item %4676 : !torch.vtensor<[],si8> -> !torch.int
%4679 = torch.aten._make_per_tensor_quantized_tensor %4674, %4677, %4678 : !torch.vtensor<[1,128,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%4680 = torch.aten.dequantize.self %4679 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],f32>
%int2_1502 = torch.constant.int 2
%int2_1503 = torch.constant.int 2
%4681 = torch.prim.ListConstruct %int2_1502, %int2_1503 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1504 = torch.constant.int 0
%int0_1505 = torch.constant.int 0
%4682 = torch.prim.ListConstruct %int0_1504, %int0_1505 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1506 = torch.constant.int 2
%int2_1507 = torch.constant.int 2
%4683 = torch.prim.ListConstruct %int2_1506, %int2_1507 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1508 = torch.constant.int 1
%int1_1509 = torch.constant.int 1
%4684 = torch.prim.ListConstruct %int1_1508, %int1_1509 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1510 = torch.constant.bool true
%4685 = torch.aten.max_pool2d %4680, %4681, %4683, %4682, %4684, %true_1510 : !torch.vtensor<[1,128,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,128,10,10],f32>
%4686 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4687 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1511 = torch.constant.int 12
%4688 = torch.aten.item %4686 : !torch.vtensor<[],f32> -> !torch.float
%4689 = torch.aten.item %4687 : !torch.vtensor<[],si8> -> !torch.int
%4690 = torch.aten.quantize_per_tensor %4685, %4688, %4689, %int12_1511 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%4691 = torch.aten.int_repr %4690 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%4692 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4693 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4694 = torch.aten.item %4692 : !torch.vtensor<[],f32> -> !torch.float
%4695 = torch.aten.item %4693 : !torch.vtensor<[],si8> -> !torch.int
%4696 = torch.aten._make_per_tensor_quantized_tensor %4691, %4694, %4695 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%4697 = torch.aten.dequantize.self %4696 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%4698 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4699 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1512 = torch.constant.int 12
%4700 = torch.aten.item %4698 : !torch.vtensor<[],f32> -> !torch.float
%4701 = torch.aten.item %4699 : !torch.vtensor<[],si8> -> !torch.int
%4702 = torch.aten.quantize_per_tensor %142, %4700, %4701, %int12_1512 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%4703 = torch.aten.int_repr %4702 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%4704 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4705 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4706 = torch.aten.item %4704 : !torch.vtensor<[],f32> -> !torch.float
%4707 = torch.aten.item %4705 : !torch.vtensor<[],si8> -> !torch.int
%4708 = torch.aten._make_per_tensor_quantized_tensor %4703, %4706, %4707 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%4709 = torch.aten.dequantize.self %4708 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%4710 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4711 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1513 = torch.constant.int 12
%4712 = torch.aten.item %4710 : !torch.vtensor<[],f32> -> !torch.float
%4713 = torch.aten.item %4711 : !torch.vtensor<[],si8> -> !torch.int
%4714 = torch.aten.quantize_per_tensor %143, %4712, %4713, %int12_1513 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4715 = torch.aten.int_repr %4714 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%4716 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4717 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4718 = torch.aten.item %4716 : !torch.vtensor<[],f32> -> !torch.float
%4719 = torch.aten.item %4717 : !torch.vtensor<[],si8> -> !torch.int
%4720 = torch.aten._make_per_tensor_quantized_tensor %4715, %4718, %4719 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4721 = torch.aten.dequantize.self %4720 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1514 = torch.constant.int 1
%int1_1515 = torch.constant.int 1
%int1_1516 = torch.constant.int 1
%int1_1517 = torch.constant.int 1
%int1_1518 = torch.constant.int 1
%int1_1519 = torch.constant.int 1
%int0_1520 = torch.constant.int 0
%4722 = torch.prim.ListConstruct %int1_1514, %int1_1515 : (!torch.int, !torch.int) -> !torch.list<int>
%4723 = torch.prim.ListConstruct %int1_1516, %int1_1517 : (!torch.int, !torch.int) -> !torch.list<int>
%4724 = torch.prim.ListConstruct %int1_1518, %int1_1519 : (!torch.int, !torch.int) -> !torch.list<int>
%4725 = torch.prim.ListConstruct %int0_1520, %int0_1520 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1521 = torch.constant.bool false
%int1_1522 = torch.constant.int 1
%4726 = torch.aten.convolution %4697, %4709, %4721, %4724, %4722, %4723, %false_1521, %4725, %int1_1522 : !torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%4727 = torch.aten.relu %4726 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[1,128,10,10],f32>
%4728 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4729 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1523 = torch.constant.int 12
%4730 = torch.aten.item %4728 : !torch.vtensor<[],f32> -> !torch.float
%4731 = torch.aten.item %4729 : !torch.vtensor<[],si8> -> !torch.int
%4732 = torch.aten.quantize_per_tensor %4727, %4730, %4731, %int12_1523 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%4733 = torch.aten.int_repr %4732 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%4734 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4735 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4736 = torch.aten.item %4734 : !torch.vtensor<[],f32> -> !torch.float
%4737 = torch.aten.item %4735 : !torch.vtensor<[],si8> -> !torch.int
%4738 = torch.aten._make_per_tensor_quantized_tensor %4733, %4736, %4737 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%4739 = torch.aten.dequantize.self %4738 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%4740 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1524 = torch.constant.int 12
%4742 = torch.aten.item %4740 : !torch.vtensor<[],f32> -> !torch.float
%4743 = torch.aten.item %4741 : !torch.vtensor<[],si8> -> !torch.int
%4744 = torch.aten.quantize_per_tensor %144, %4742, %4743, %int12_1524 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%4745 = torch.aten.int_repr %4744 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%4746 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4748 = torch.aten.item %4746 : !torch.vtensor<[],f32> -> !torch.float
%4749 = torch.aten.item %4747 : !torch.vtensor<[],si8> -> !torch.int
%4750 = torch.aten._make_per_tensor_quantized_tensor %4745, %4748, %4749 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%4751 = torch.aten.dequantize.self %4750 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%4752 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1525 = torch.constant.int 12
%4754 = torch.aten.item %4752 : !torch.vtensor<[],f32> -> !torch.float
%4755 = torch.aten.item %4753 : !torch.vtensor<[],si8> -> !torch.int
%4756 = torch.aten.quantize_per_tensor %145, %4754, %4755, %int12_1525 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4757 = torch.aten.int_repr %4756 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%4758 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4759 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4760 = torch.aten.item %4758 : !torch.vtensor<[],f32> -> !torch.float
%4761 = torch.aten.item %4759 : !torch.vtensor<[],si8> -> !torch.int
%4762 = torch.aten._make_per_tensor_quantized_tensor %4757, %4760, %4761 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4763 = torch.aten.dequantize.self %4762 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int2_1526 = torch.constant.int 2
%int2_1527 = torch.constant.int 2
%int2_1528 = torch.constant.int 2
%int2_1529 = torch.constant.int 2
%int1_1530 = torch.constant.int 1
%int1_1531 = torch.constant.int 1
%int0_1532 = torch.constant.int 0
%4764 = torch.prim.ListConstruct %int2_1526, %int2_1527 : (!torch.int, !torch.int) -> !torch.list<int>
%4765 = torch.prim.ListConstruct %int2_1528, %int2_1529 : (!torch.int, !torch.int) -> !torch.list<int>
%4766 = torch.prim.ListConstruct %int1_1530, %int1_1531 : (!torch.int, !torch.int) -> !torch.list<int>
%4767 = torch.prim.ListConstruct %int0_1532, %int0_1532 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1533 = torch.constant.bool false
%int1_1534 = torch.constant.int 1
%4768 = torch.aten.convolution %4739, %4751, %4763, %4766, %4764, %4765, %false_1533, %4767, %int1_1534 : !torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%4769 = torch.aten.relu %4768 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[1,128,10,10],f32>
%4770 = torch.prim.ListConstruct %4769, %4739 : (!torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[1,128,10,10],f32>) -> !torch.list<vtensor>
%int1_1535 = torch.constant.int 1
%4771 = torch.aten.cat %4770, %int1_1535 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,256,10,10],f32>
%4772 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4773 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1536 = torch.constant.int 12
%4774 = torch.aten.item %4772 : !torch.vtensor<[],f32> -> !torch.float
%4775 = torch.aten.item %4773 : !torch.vtensor<[],si8> -> !torch.int
%4776 = torch.aten.quantize_per_tensor %4771, %4774, %4775, %int12_1536 : !torch.vtensor<[1,256,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%4777 = torch.aten.int_repr %4776 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],si8>
%4778 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4779 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4780 = torch.aten.item %4778 : !torch.vtensor<[],f32> -> !torch.float
%4781 = torch.aten.item %4779 : !torch.vtensor<[],si8> -> !torch.int
%4782 = torch.aten._make_per_tensor_quantized_tensor %4777, %4780, %4781 : !torch.vtensor<[1,256,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,10,10],!torch.qint8>
%4783 = torch.aten.dequantize.self %4782 : !torch.vtensor<[1,256,10,10],!torch.qint8> -> !torch.vtensor<[1,256,10,10],f32>
%4784 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4785 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1537 = torch.constant.int 12
%4786 = torch.aten.item %4784 : !torch.vtensor<[],f32> -> !torch.float
%4787 = torch.aten.item %4785 : !torch.vtensor<[],si8> -> !torch.int
%4788 = torch.aten.quantize_per_tensor %146, %4786, %4787, %int12_1537 : !torch.vtensor<[128,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%4789 = torch.aten.int_repr %4788 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],si8>
%4790 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4791 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4792 = torch.aten.item %4790 : !torch.vtensor<[],f32> -> !torch.float
%4793 = torch.aten.item %4791 : !torch.vtensor<[],si8> -> !torch.int
%4794 = torch.aten._make_per_tensor_quantized_tensor %4789, %4792, %4793 : !torch.vtensor<[128,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%4795 = torch.aten.dequantize.self %4794 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],f32>
%4796 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4797 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1538 = torch.constant.int 12
%4798 = torch.aten.item %4796 : !torch.vtensor<[],f32> -> !torch.float
%4799 = torch.aten.item %4797 : !torch.vtensor<[],si8> -> !torch.int
%4800 = torch.aten.quantize_per_tensor %147, %4798, %4799, %int12_1538 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4801 = torch.aten.int_repr %4800 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%4802 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4803 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4804 = torch.aten.item %4802 : !torch.vtensor<[],f32> -> !torch.float
%4805 = torch.aten.item %4803 : !torch.vtensor<[],si8> -> !torch.int
%4806 = torch.aten._make_per_tensor_quantized_tensor %4801, %4804, %4805 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4807 = torch.aten.dequantize.self %4806 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1539 = torch.constant.int 1
%int1_1540 = torch.constant.int 1
%int1_1541 = torch.constant.int 1
%int1_1542 = torch.constant.int 1
%int1_1543 = torch.constant.int 1
%int1_1544 = torch.constant.int 1
%int0_1545 = torch.constant.int 0
%4808 = torch.prim.ListConstruct %int1_1539, %int1_1540 : (!torch.int, !torch.int) -> !torch.list<int>
%4809 = torch.prim.ListConstruct %int1_1541, %int1_1542 : (!torch.int, !torch.int) -> !torch.list<int>
%4810 = torch.prim.ListConstruct %int1_1543, %int1_1544 : (!torch.int, !torch.int) -> !torch.list<int>
%4811 = torch.prim.ListConstruct %int0_1545, %int0_1545 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1546 = torch.constant.bool false
%int1_1547 = torch.constant.int 1
%4812 = torch.aten.convolution %4783, %4795, %4807, %4810, %4808, %4809, %false_1546, %4811, %int1_1547 : !torch.vtensor<[1,256,10,10],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%4813 = torch.aten.relu %4812 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[1,128,10,10],f32>
%4814 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4815 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1548 = torch.constant.int 12
%4816 = torch.aten.item %4814 : !torch.vtensor<[],f32> -> !torch.float
%4817 = torch.aten.item %4815 : !torch.vtensor<[],si8> -> !torch.int
%4818 = torch.aten.quantize_per_tensor %4813, %4816, %4817, %int12_1548 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%4819 = torch.aten.int_repr %4818 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%4820 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4821 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4822 = torch.aten.item %4820 : !torch.vtensor<[],f32> -> !torch.float
%4823 = torch.aten.item %4821 : !torch.vtensor<[],si8> -> !torch.int
%4824 = torch.aten._make_per_tensor_quantized_tensor %4819, %4822, %4823 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%4825 = torch.aten.dequantize.self %4824 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%4826 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%4827 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%4828 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1549 = torch.constant.int 0
%int0_1550 = torch.constant.int 0
%int0_1551 = torch.constant.int 0
%4829 = torch.aten.select.int %4828, %int0_1549, %int0_1551 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4830 = torch.aten.item %4829 : !torch.vtensor<[1],si64> -> !torch.int
%4831 = torch.aten.lt.int %4830, %int0_1549 : !torch.int, !torch.int -> !torch.bool
%4832 = torch.aten.Int.bool %4831 : !torch.bool -> !torch.int
%4833 = torch.aten.mul.int %4832, %int0_1550 : !torch.int, !torch.int -> !torch.int
%4834 = torch.aten.add.int %4830, %4833 : !torch.int, !torch.int -> !torch.int
%4835 = torch.prim.ListConstruct %4834 : (!torch.int) -> !torch.list<int>
%false_1552 = torch.constant.bool false
%none_1553 = torch.constant.none
%4836 = torch.aten.tensor %4835, %none_1553, %none_1553, %false_1552 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1554, %indices_1555 = torch.aten.sort %4836, %int0_1549, %false_1552 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1556 = torch.constant.int 0
%4837 = torch.aten.select.int %values_1554, %int0_1549, %int0_1556 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4838 = torch.aten.item %4837 : !torch.vtensor<[1],si64> -> !torch.int
%4839 = torch.aten.unsqueeze %4826, %4838 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4840 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1557 = torch.constant.int 0
%int0_1558 = torch.constant.int 0
%int0_1559 = torch.constant.int 0
%4841 = torch.aten.select.int %4840, %int0_1557, %int0_1559 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4842 = torch.aten.item %4841 : !torch.vtensor<[1],si64> -> !torch.int
%4843 = torch.aten.lt.int %4842, %int0_1557 : !torch.int, !torch.int -> !torch.bool
%4844 = torch.aten.Int.bool %4843 : !torch.bool -> !torch.int
%4845 = torch.aten.mul.int %4844, %int0_1558 : !torch.int, !torch.int -> !torch.int
%4846 = torch.aten.add.int %4842, %4845 : !torch.int, !torch.int -> !torch.int
%4847 = torch.prim.ListConstruct %4846 : (!torch.int) -> !torch.list<int>
%false_1560 = torch.constant.bool false
%none_1561 = torch.constant.none
%4848 = torch.aten.tensor %4847, %none_1561, %none_1561, %false_1560 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1562, %indices_1563 = torch.aten.sort %4848, %int0_1557, %false_1560 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1564 = torch.constant.int 0
%4849 = torch.aten.select.int %values_1562, %int0_1557, %int0_1564 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4850 = torch.aten.item %4849 : !torch.vtensor<[1],si64> -> !torch.int
%4851 = torch.aten.unsqueeze %4827, %4850 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4852 = torch.prim.ListConstruct %4839, %4851 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1565 = torch.constant.int 0
%4853 = torch.aten.cat %4852, %int0_1565 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%4854 = torch.aten._shape_as_tensor %4825 : !torch.vtensor<[1,128,10,10],f32> -> !torch.vtensor<[4],si64>
%4855 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4856 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4857 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1566 = torch.constant.none
%int1_1567 = torch.constant.int 1
%4858 = torch.prim.ListConstruct %int1_1567 : (!torch.int) -> !torch.list<int>
%4859 = torch.aten.ones %4858, %none_1566, %none_1566, %none_1566, %none_1566 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1568 = torch.constant.int 0
%int0_1569 = torch.constant.int 0
%4860 = torch.prim.NumToTensor.Scalar %int0_1569 : !torch.int -> !torch.vtensor<[1],si64>
%4861 = torch.aten.index_select %4856, %int0_1568, %4860 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4862 = torch.aten.item %4861 : !torch.vtensor<[1],si64> -> !torch.int
%4863 = torch.aten.index_select %4857, %int0_1568, %4860 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4864 = torch.aten.item %4863 : !torch.vtensor<[1],si64> -> !torch.int
%4865 = torch.aten.index_select %4855, %int0_1568, %4860 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4866 = torch.aten.item %4865 : !torch.vtensor<[1],si64> -> !torch.int
%4867 = torch.aten.index_select %4859, %int0_1568, %4860 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4868 = torch.aten.item %4867 : !torch.vtensor<[1],si64> -> !torch.int
%4869 = torch.aten.slice.Tensor %4854, %4866, %4862, %4864, %4868 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1570 = torch.constant.int 4
%none_1571 = torch.constant.none
%false_1572 = torch.constant.bool false
%4870 = torch.aten.to.dtype %4853, %int4_1570, %false_1572, %false_1572, %none_1571 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%4871 = torch.prim.ListConstruct %4869, %4870 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1573 = torch.constant.int 0
%4872 = torch.aten.cat %4871, %int0_1573 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%4873 = torch.operator "onnx.Resize"(%4825, %none, %none, %4872) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,128,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%4874 = torch.prim.ListConstruct %4873, %4680 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,128,20,20],f32>) -> !torch.list<vtensor>
%int1_1574 = torch.constant.int 1
%4875 = torch.aten.cat %4874, %int1_1574 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%4876 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4877 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1575 = torch.constant.int 12
%4878 = torch.aten.item %4876 : !torch.vtensor<[],f32> -> !torch.float
%4879 = torch.aten.item %4877 : !torch.vtensor<[],si8> -> !torch.int
%4880 = torch.aten.quantize_per_tensor %4875, %4878, %4879, %int12_1575 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%4881 = torch.aten.int_repr %4880 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%4882 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4883 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4884 = torch.aten.item %4882 : !torch.vtensor<[],f32> -> !torch.float
%4885 = torch.aten.item %4883 : !torch.vtensor<[],si8> -> !torch.int
%4886 = torch.aten._make_per_tensor_quantized_tensor %4881, %4884, %4885 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%4887 = torch.aten.dequantize.self %4886 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%4888 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4889 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1576 = torch.constant.int 12
%4890 = torch.aten.item %4888 : !torch.vtensor<[],f32> -> !torch.float
%4891 = torch.aten.item %4889 : !torch.vtensor<[],si8> -> !torch.int
%4892 = torch.aten.quantize_per_tensor %148, %4890, %4891, %int12_1576 : !torch.vtensor<[128,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%4893 = torch.aten.int_repr %4892 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],si8>
%4894 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%4895 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4896 = torch.aten.item %4894 : !torch.vtensor<[],f32> -> !torch.float
%4897 = torch.aten.item %4895 : !torch.vtensor<[],si8> -> !torch.int
%4898 = torch.aten._make_per_tensor_quantized_tensor %4893, %4896, %4897 : !torch.vtensor<[128,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%4899 = torch.aten.dequantize.self %4898 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],f32>
%4900 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4901 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1577 = torch.constant.int 12
%4902 = torch.aten.item %4900 : !torch.vtensor<[],f32> -> !torch.float
%4903 = torch.aten.item %4901 : !torch.vtensor<[],si8> -> !torch.int
%4904 = torch.aten.quantize_per_tensor %149, %4902, %4903, %int12_1577 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4905 = torch.aten.int_repr %4904 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%4906 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4907 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4908 = torch.aten.item %4906 : !torch.vtensor<[],f32> -> !torch.float
%4909 = torch.aten.item %4907 : !torch.vtensor<[],si8> -> !torch.int
%4910 = torch.aten._make_per_tensor_quantized_tensor %4905, %4908, %4909 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%4911 = torch.aten.dequantize.self %4910 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1578 = torch.constant.int 1
%int1_1579 = torch.constant.int 1
%int1_1580 = torch.constant.int 1
%int1_1581 = torch.constant.int 1
%int1_1582 = torch.constant.int 1
%int1_1583 = torch.constant.int 1
%int0_1584 = torch.constant.int 0
%4912 = torch.prim.ListConstruct %int1_1578, %int1_1579 : (!torch.int, !torch.int) -> !torch.list<int>
%4913 = torch.prim.ListConstruct %int1_1580, %int1_1581 : (!torch.int, !torch.int) -> !torch.list<int>
%4914 = torch.prim.ListConstruct %int1_1582, %int1_1583 : (!torch.int, !torch.int) -> !torch.list<int>
%4915 = torch.prim.ListConstruct %int0_1584, %int0_1584 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1585 = torch.constant.bool false
%int1_1586 = torch.constant.int 1
%4916 = torch.aten.convolution %4887, %4899, %4911, %4914, %4912, %4913, %false_1585, %4915, %int1_1586 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,20,20],f32>
%4917 = torch.aten.relu %4916 : !torch.vtensor<[1,128,20,20],f32> -> !torch.vtensor<[1,128,20,20],f32>
%4918 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4919 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1587 = torch.constant.int 12
%4920 = torch.aten.item %4918 : !torch.vtensor<[],f32> -> !torch.float
%4921 = torch.aten.item %4919 : !torch.vtensor<[],si8> -> !torch.int
%4922 = torch.aten.quantize_per_tensor %4917, %4920, %4921, %int12_1587 : !torch.vtensor<[1,128,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%4923 = torch.aten.int_repr %4922 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],si8>
%4924 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4925 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4926 = torch.aten.item %4924 : !torch.vtensor<[],f32> -> !torch.float
%4927 = torch.aten.item %4925 : !torch.vtensor<[],si8> -> !torch.int
%4928 = torch.aten._make_per_tensor_quantized_tensor %4923, %4926, %4927 : !torch.vtensor<[1,128,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,20,20],!torch.qint8>
%4929 = torch.aten.dequantize.self %4928 : !torch.vtensor<[1,128,20,20],!torch.qint8> -> !torch.vtensor<[1,128,20,20],f32>
%4930 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%4931 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%4932 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1588 = torch.constant.int 0
%int0_1589 = torch.constant.int 0
%int0_1590 = torch.constant.int 0
%4933 = torch.aten.select.int %4932, %int0_1588, %int0_1590 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4934 = torch.aten.item %4933 : !torch.vtensor<[1],si64> -> !torch.int
%4935 = torch.aten.lt.int %4934, %int0_1588 : !torch.int, !torch.int -> !torch.bool
%4936 = torch.aten.Int.bool %4935 : !torch.bool -> !torch.int
%4937 = torch.aten.mul.int %4936, %int0_1589 : !torch.int, !torch.int -> !torch.int
%4938 = torch.aten.add.int %4934, %4937 : !torch.int, !torch.int -> !torch.int
%4939 = torch.prim.ListConstruct %4938 : (!torch.int) -> !torch.list<int>
%false_1591 = torch.constant.bool false
%none_1592 = torch.constant.none
%4940 = torch.aten.tensor %4939, %none_1592, %none_1592, %false_1591 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1593, %indices_1594 = torch.aten.sort %4940, %int0_1588, %false_1591 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1595 = torch.constant.int 0
%4941 = torch.aten.select.int %values_1593, %int0_1588, %int0_1595 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4942 = torch.aten.item %4941 : !torch.vtensor<[1],si64> -> !torch.int
%4943 = torch.aten.unsqueeze %4930, %4942 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4944 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1596 = torch.constant.int 0
%int0_1597 = torch.constant.int 0
%int0_1598 = torch.constant.int 0
%4945 = torch.aten.select.int %4944, %int0_1596, %int0_1598 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4946 = torch.aten.item %4945 : !torch.vtensor<[1],si64> -> !torch.int
%4947 = torch.aten.lt.int %4946, %int0_1596 : !torch.int, !torch.int -> !torch.bool
%4948 = torch.aten.Int.bool %4947 : !torch.bool -> !torch.int
%4949 = torch.aten.mul.int %4948, %int0_1597 : !torch.int, !torch.int -> !torch.int
%4950 = torch.aten.add.int %4946, %4949 : !torch.int, !torch.int -> !torch.int
%4951 = torch.prim.ListConstruct %4950 : (!torch.int) -> !torch.list<int>
%false_1599 = torch.constant.bool false
%none_1600 = torch.constant.none
%4952 = torch.aten.tensor %4951, %none_1600, %none_1600, %false_1599 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1601, %indices_1602 = torch.aten.sort %4952, %int0_1596, %false_1599 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1603 = torch.constant.int 0
%4953 = torch.aten.select.int %values_1601, %int0_1596, %int0_1603 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4954 = torch.aten.item %4953 : !torch.vtensor<[1],si64> -> !torch.int
%4955 = torch.aten.unsqueeze %4931, %4954 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4956 = torch.prim.ListConstruct %4943, %4955 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1604 = torch.constant.int 0
%4957 = torch.aten.cat %4956, %int0_1604 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%4958 = torch.aten._shape_as_tensor %4929 : !torch.vtensor<[1,128,20,20],f32> -> !torch.vtensor<[4],si64>
%4959 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4960 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4961 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1605 = torch.constant.none
%int1_1606 = torch.constant.int 1
%4962 = torch.prim.ListConstruct %int1_1606 : (!torch.int) -> !torch.list<int>
%4963 = torch.aten.ones %4962, %none_1605, %none_1605, %none_1605, %none_1605 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1607 = torch.constant.int 0
%int0_1608 = torch.constant.int 0
%4964 = torch.prim.NumToTensor.Scalar %int0_1608 : !torch.int -> !torch.vtensor<[1],si64>
%4965 = torch.aten.index_select %4960, %int0_1607, %4964 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4966 = torch.aten.item %4965 : !torch.vtensor<[1],si64> -> !torch.int
%4967 = torch.aten.index_select %4961, %int0_1607, %4964 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4968 = torch.aten.item %4967 : !torch.vtensor<[1],si64> -> !torch.int
%4969 = torch.aten.index_select %4959, %int0_1607, %4964 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4970 = torch.aten.item %4969 : !torch.vtensor<[1],si64> -> !torch.int
%4971 = torch.aten.index_select %4963, %int0_1607, %4964 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4972 = torch.aten.item %4971 : !torch.vtensor<[1],si64> -> !torch.int
%4973 = torch.aten.slice.Tensor %4958, %4970, %4966, %4968, %4972 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1609 = torch.constant.int 4
%none_1610 = torch.constant.none
%false_1611 = torch.constant.bool false
%4974 = torch.aten.to.dtype %4957, %int4_1609, %false_1611, %false_1611, %none_1610 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%4975 = torch.prim.ListConstruct %4973, %4974 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1612 = torch.constant.int 0
%4976 = torch.aten.cat %4975, %int0_1612 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%4977 = torch.operator "onnx.Resize"(%4929, %none, %none, %4976) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,128,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%4978 = torch.prim.ListConstruct %4977, %4621 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,128,40,40],f32>) -> !torch.list<vtensor>
%int1_1613 = torch.constant.int 1
%4979 = torch.aten.cat %4978, %int1_1613 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%4980 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4981 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1614 = torch.constant.int 12
%4982 = torch.aten.item %4980 : !torch.vtensor<[],f32> -> !torch.float
%4983 = torch.aten.item %4981 : !torch.vtensor<[],si8> -> !torch.int
%4984 = torch.aten.quantize_per_tensor %4979, %4982, %4983, %int12_1614 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%4985 = torch.aten.int_repr %4984 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%4986 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4987 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4988 = torch.aten.item %4986 : !torch.vtensor<[],f32> -> !torch.float
%4989 = torch.aten.item %4987 : !torch.vtensor<[],si8> -> !torch.int
%4990 = torch.aten._make_per_tensor_quantized_tensor %4985, %4988, %4989 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%4991 = torch.aten.dequantize.self %4990 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%4992 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4993 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1615 = torch.constant.int 12
%4994 = torch.aten.item %4992 : !torch.vtensor<[],f32> -> !torch.float
%4995 = torch.aten.item %4993 : !torch.vtensor<[],si8> -> !torch.int
%4996 = torch.aten.quantize_per_tensor %150, %4994, %4995, %int12_1615 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%4997 = torch.aten.int_repr %4996 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%4998 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4999 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5000 = torch.aten.item %4998 : !torch.vtensor<[],f32> -> !torch.float
%5001 = torch.aten.item %4999 : !torch.vtensor<[],si8> -> !torch.int
%5002 = torch.aten._make_per_tensor_quantized_tensor %4997, %5000, %5001 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%5003 = torch.aten.dequantize.self %5002 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%5004 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5005 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1616 = torch.constant.int 12
%5006 = torch.aten.item %5004 : !torch.vtensor<[],f32> -> !torch.float
%5007 = torch.aten.item %5005 : !torch.vtensor<[],si8> -> !torch.int
%5008 = torch.aten.quantize_per_tensor %151, %5006, %5007, %int12_1616 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%5009 = torch.aten.int_repr %5008 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%5010 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5011 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5012 = torch.aten.item %5010 : !torch.vtensor<[],f32> -> !torch.float
%5013 = torch.aten.item %5011 : !torch.vtensor<[],si8> -> !torch.int
%5014 = torch.aten._make_per_tensor_quantized_tensor %5009, %5012, %5013 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%5015 = torch.aten.dequantize.self %5014 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_1617 = torch.constant.int 1
%int1_1618 = torch.constant.int 1
%int1_1619 = torch.constant.int 1
%int1_1620 = torch.constant.int 1
%int1_1621 = torch.constant.int 1
%int1_1622 = torch.constant.int 1
%int0_1623 = torch.constant.int 0
%5016 = torch.prim.ListConstruct %int1_1617, %int1_1618 : (!torch.int, !torch.int) -> !torch.list<int>
%5017 = torch.prim.ListConstruct %int1_1619, %int1_1620 : (!torch.int, !torch.int) -> !torch.list<int>
%5018 = torch.prim.ListConstruct %int1_1621, %int1_1622 : (!torch.int, !torch.int) -> !torch.list<int>
%5019 = torch.prim.ListConstruct %int0_1623, %int0_1623 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1624 = torch.constant.bool false
%int1_1625 = torch.constant.int 1
%5020 = torch.aten.convolution %4991, %5003, %5015, %5018, %5016, %5017, %false_1624, %5019, %int1_1625 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,40,40],f32>
%5021 = torch.aten.relu %5020 : !torch.vtensor<[1,256,40,40],f32> -> !torch.vtensor<[1,256,40,40],f32>
%5022 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5023 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1626 = torch.constant.int 12
%5024 = torch.aten.item %5022 : !torch.vtensor<[],f32> -> !torch.float
%5025 = torch.aten.item %5023 : !torch.vtensor<[],si8> -> !torch.int
%5026 = torch.aten.quantize_per_tensor %5021, %5024, %5025, %int12_1626 : !torch.vtensor<[1,256,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%5027 = torch.aten.int_repr %5026 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],si8>
%5028 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5029 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5030 = torch.aten.item %5028 : !torch.vtensor<[],f32> -> !torch.float
%5031 = torch.aten.item %5029 : !torch.vtensor<[],si8> -> !torch.int
%5032 = torch.aten._make_per_tensor_quantized_tensor %5027, %5030, %5031 : !torch.vtensor<[1,256,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%5033 = torch.aten.dequantize.self %5032 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],f32>
%int1_1627 = torch.constant.int 1
%5034 = torch.aten.add.Tensor %5033, %4579, %int1_1627 : !torch.vtensor<[1,256,40,40],f32>, !torch.vtensor<[1,256,40,40],f32>, !torch.int -> !torch.vtensor<[1,256,40,40],f32>
%5035 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5036 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1628 = torch.constant.int 12
%5037 = torch.aten.item %5035 : !torch.vtensor<[],f32> -> !torch.float
%5038 = torch.aten.item %5036 : !torch.vtensor<[],si8> -> !torch.int
%5039 = torch.aten.quantize_per_tensor %5034, %5037, %5038, %int12_1628 : !torch.vtensor<[1,256,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%5040 = torch.aten.int_repr %5039 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],si8>
%5041 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5042 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5043 = torch.aten.item %5041 : !torch.vtensor<[],f32> -> !torch.float
%5044 = torch.aten.item %5042 : !torch.vtensor<[],si8> -> !torch.int
%5045 = torch.aten._make_per_tensor_quantized_tensor %5040, %5043, %5044 : !torch.vtensor<[1,256,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,40,40],!torch.qint8>
%5046 = torch.aten.dequantize.self %5045 : !torch.vtensor<[1,256,40,40],!torch.qint8> -> !torch.vtensor<[1,256,40,40],f32>
%5047 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%5048 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%5049 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1629 = torch.constant.int 0
%int0_1630 = torch.constant.int 0
%int0_1631 = torch.constant.int 0
%5050 = torch.aten.select.int %5049, %int0_1629, %int0_1631 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5051 = torch.aten.item %5050 : !torch.vtensor<[1],si64> -> !torch.int
%5052 = torch.aten.lt.int %5051, %int0_1629 : !torch.int, !torch.int -> !torch.bool
%5053 = torch.aten.Int.bool %5052 : !torch.bool -> !torch.int
%5054 = torch.aten.mul.int %5053, %int0_1630 : !torch.int, !torch.int -> !torch.int
%5055 = torch.aten.add.int %5051, %5054 : !torch.int, !torch.int -> !torch.int
%5056 = torch.prim.ListConstruct %5055 : (!torch.int) -> !torch.list<int>
%false_1632 = torch.constant.bool false
%none_1633 = torch.constant.none
%5057 = torch.aten.tensor %5056, %none_1633, %none_1633, %false_1632 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1634, %indices_1635 = torch.aten.sort %5057, %int0_1629, %false_1632 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1636 = torch.constant.int 0
%5058 = torch.aten.select.int %values_1634, %int0_1629, %int0_1636 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5059 = torch.aten.item %5058 : !torch.vtensor<[1],si64> -> !torch.int
%5060 = torch.aten.unsqueeze %5047, %5059 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5061 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1637 = torch.constant.int 0
%int0_1638 = torch.constant.int 0
%int0_1639 = torch.constant.int 0
%5062 = torch.aten.select.int %5061, %int0_1637, %int0_1639 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5063 = torch.aten.item %5062 : !torch.vtensor<[1],si64> -> !torch.int
%5064 = torch.aten.lt.int %5063, %int0_1637 : !torch.int, !torch.int -> !torch.bool
%5065 = torch.aten.Int.bool %5064 : !torch.bool -> !torch.int
%5066 = torch.aten.mul.int %5065, %int0_1638 : !torch.int, !torch.int -> !torch.int
%5067 = torch.aten.add.int %5063, %5066 : !torch.int, !torch.int -> !torch.int
%5068 = torch.prim.ListConstruct %5067 : (!torch.int) -> !torch.list<int>
%false_1640 = torch.constant.bool false
%none_1641 = torch.constant.none
%5069 = torch.aten.tensor %5068, %none_1641, %none_1641, %false_1640 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1642, %indices_1643 = torch.aten.sort %5069, %int0_1637, %false_1640 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1644 = torch.constant.int 0
%5070 = torch.aten.select.int %values_1642, %int0_1637, %int0_1644 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5071 = torch.aten.item %5070 : !torch.vtensor<[1],si64> -> !torch.int
%5072 = torch.aten.unsqueeze %5048, %5071 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5073 = torch.prim.ListConstruct %5060, %5072 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1645 = torch.constant.int 0
%5074 = torch.aten.cat %5073, %int0_1645 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%5075 = torch.aten._shape_as_tensor %5046 : !torch.vtensor<[1,256,40,40],f32> -> !torch.vtensor<[4],si64>
%5076 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5077 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5078 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1646 = torch.constant.none
%int1_1647 = torch.constant.int 1
%5079 = torch.prim.ListConstruct %int1_1647 : (!torch.int) -> !torch.list<int>
%5080 = torch.aten.ones %5079, %none_1646, %none_1646, %none_1646, %none_1646 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1648 = torch.constant.int 0
%int0_1649 = torch.constant.int 0
%5081 = torch.prim.NumToTensor.Scalar %int0_1649 : !torch.int -> !torch.vtensor<[1],si64>
%5082 = torch.aten.index_select %5077, %int0_1648, %5081 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5083 = torch.aten.item %5082 : !torch.vtensor<[1],si64> -> !torch.int
%5084 = torch.aten.index_select %5078, %int0_1648, %5081 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5085 = torch.aten.item %5084 : !torch.vtensor<[1],si64> -> !torch.int
%5086 = torch.aten.index_select %5076, %int0_1648, %5081 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5087 = torch.aten.item %5086 : !torch.vtensor<[1],si64> -> !torch.int
%5088 = torch.aten.index_select %5080, %int0_1648, %5081 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5089 = torch.aten.item %5088 : !torch.vtensor<[1],si64> -> !torch.int
%5090 = torch.aten.slice.Tensor %5075, %5087, %5083, %5085, %5089 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1650 = torch.constant.int 4
%none_1651 = torch.constant.none
%false_1652 = torch.constant.bool false
%5091 = torch.aten.to.dtype %5074, %int4_1650, %false_1652, %false_1652, %none_1651 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%5092 = torch.prim.ListConstruct %5090, %5091 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1653 = torch.constant.int 0
%5093 = torch.aten.cat %5092, %int0_1653 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%5094 = torch.operator "onnx.Resize"(%5046, %none, %none, %5093) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,256,40,40],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%5095 = torch.prim.ListConstruct %5094, %2788 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,256,80,80],f32>) -> !torch.list<vtensor>
%int1_1654 = torch.constant.int 1
%5096 = torch.aten.cat %5095, %int1_1654 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,80,80],f32>
%5097 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5098 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1655 = torch.constant.int 12
%5099 = torch.aten.item %5097 : !torch.vtensor<[],f32> -> !torch.float
%5100 = torch.aten.item %5098 : !torch.vtensor<[],si8> -> !torch.int
%5101 = torch.aten.quantize_per_tensor %5096, %5099, %5100, %int12_1655 : !torch.vtensor<[1,?,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%5102 = torch.aten.int_repr %5101 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],si8>
%5103 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5104 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5105 = torch.aten.item %5103 : !torch.vtensor<[],f32> -> !torch.float
%5106 = torch.aten.item %5104 : !torch.vtensor<[],si8> -> !torch.int
%5107 = torch.aten._make_per_tensor_quantized_tensor %5102, %5105, %5106 : !torch.vtensor<[1,?,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%5108 = torch.aten.dequantize.self %5107 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],f32>
%5109 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1656 = torch.constant.int 12
%5111 = torch.aten.item %5109 : !torch.vtensor<[],f32> -> !torch.float
%5112 = torch.aten.item %5110 : !torch.vtensor<[],si8> -> !torch.int
%5113 = torch.aten.quantize_per_tensor %152, %5111, %5112, %int12_1656 : !torch.vtensor<[128,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,3,3],!torch.qint8>
%5114 = torch.aten.int_repr %5113 : !torch.vtensor<[128,512,3,3],!torch.qint8> -> !torch.vtensor<[128,512,3,3],si8>
%5115 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5116 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5117 = torch.aten.item %5115 : !torch.vtensor<[],f32> -> !torch.float
%5118 = torch.aten.item %5116 : !torch.vtensor<[],si8> -> !torch.int
%5119 = torch.aten._make_per_tensor_quantized_tensor %5114, %5117, %5118 : !torch.vtensor<[128,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,3,3],!torch.qint8>
%5120 = torch.aten.dequantize.self %5119 : !torch.vtensor<[128,512,3,3],!torch.qint8> -> !torch.vtensor<[128,512,3,3],f32>
%5121 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5122 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1657 = torch.constant.int 12
%5123 = torch.aten.item %5121 : !torch.vtensor<[],f32> -> !torch.float
%5124 = torch.aten.item %5122 : !torch.vtensor<[],si8> -> !torch.int
%5125 = torch.aten.quantize_per_tensor %153, %5123, %5124, %int12_1657 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%5126 = torch.aten.int_repr %5125 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%5127 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5128 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5129 = torch.aten.item %5127 : !torch.vtensor<[],f32> -> !torch.float
%5130 = torch.aten.item %5128 : !torch.vtensor<[],si8> -> !torch.int
%5131 = torch.aten._make_per_tensor_quantized_tensor %5126, %5129, %5130 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%5132 = torch.aten.dequantize.self %5131 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1658 = torch.constant.int 1
%int1_1659 = torch.constant.int 1
%int1_1660 = torch.constant.int 1
%int1_1661 = torch.constant.int 1
%int1_1662 = torch.constant.int 1
%int1_1663 = torch.constant.int 1
%int0_1664 = torch.constant.int 0
%5133 = torch.prim.ListConstruct %int1_1658, %int1_1659 : (!torch.int, !torch.int) -> !torch.list<int>
%5134 = torch.prim.ListConstruct %int1_1660, %int1_1661 : (!torch.int, !torch.int) -> !torch.list<int>
%5135 = torch.prim.ListConstruct %int1_1662, %int1_1663 : (!torch.int, !torch.int) -> !torch.list<int>
%5136 = torch.prim.ListConstruct %int0_1664, %int0_1664 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1665 = torch.constant.bool false
%int1_1666 = torch.constant.int 1
%5137 = torch.aten.convolution %5108, %5120, %5132, %5135, %5133, %5134, %false_1665, %5136, %int1_1666 : !torch.vtensor<[1,?,80,80],f32>, !torch.vtensor<[128,512,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,80,80],f32>
%5138 = torch.aten.relu %5137 : !torch.vtensor<[1,128,80,80],f32> -> !torch.vtensor<[1,128,80,80],f32>
%5139 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5140 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1667 = torch.constant.int 12
%5141 = torch.aten.item %5139 : !torch.vtensor<[],f32> -> !torch.float
%5142 = torch.aten.item %5140 : !torch.vtensor<[],si8> -> !torch.int
%5143 = torch.aten.quantize_per_tensor %5138, %5141, %5142, %int12_1667 : !torch.vtensor<[1,128,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%5144 = torch.aten.int_repr %5143 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],si8>
%5145 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5146 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5147 = torch.aten.item %5145 : !torch.vtensor<[],f32> -> !torch.float
%5148 = torch.aten.item %5146 : !torch.vtensor<[],si8> -> !torch.int
%5149 = torch.aten._make_per_tensor_quantized_tensor %5144, %5147, %5148 : !torch.vtensor<[1,128,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%5150 = torch.aten.dequantize.self %5149 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],f32>
%5151 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1668 = torch.constant.int 12
%5153 = torch.aten.item %5151 : !torch.vtensor<[],f32> -> !torch.float
%5154 = torch.aten.item %5152 : !torch.vtensor<[],si8> -> !torch.int
%5155 = torch.aten.quantize_per_tensor %154, %5153, %5154, %int12_1668 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5156 = torch.aten.int_repr %5155 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%5157 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5159 = torch.aten.item %5157 : !torch.vtensor<[],f32> -> !torch.float
%5160 = torch.aten.item %5158 : !torch.vtensor<[],si8> -> !torch.int
%5161 = torch.aten._make_per_tensor_quantized_tensor %5156, %5159, %5160 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5162 = torch.aten.dequantize.self %5161 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%5163 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1669 = torch.constant.int 12
%5165 = torch.aten.item %5163 : !torch.vtensor<[],f32> -> !torch.float
%5166 = torch.aten.item %5164 : !torch.vtensor<[],si8> -> !torch.int
%5167 = torch.aten.quantize_per_tensor %155, %5165, %5166, %int12_1669 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5168 = torch.aten.int_repr %5167 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5169 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5171 = torch.aten.item %5169 : !torch.vtensor<[],f32> -> !torch.float
%5172 = torch.aten.item %5170 : !torch.vtensor<[],si8> -> !torch.int
%5173 = torch.aten._make_per_tensor_quantized_tensor %5168, %5171, %5172 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5174 = torch.aten.dequantize.self %5173 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1670 = torch.constant.int 1
%int1_1671 = torch.constant.int 1
%int1_1672 = torch.constant.int 1
%int1_1673 = torch.constant.int 1
%int1_1674 = torch.constant.int 1
%int1_1675 = torch.constant.int 1
%int0_1676 = torch.constant.int 0
%5175 = torch.prim.ListConstruct %int1_1670, %int1_1671 : (!torch.int, !torch.int) -> !torch.list<int>
%5176 = torch.prim.ListConstruct %int1_1672, %int1_1673 : (!torch.int, !torch.int) -> !torch.list<int>
%5177 = torch.prim.ListConstruct %int1_1674, %int1_1675 : (!torch.int, !torch.int) -> !torch.list<int>
%5178 = torch.prim.ListConstruct %int0_1676, %int0_1676 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1677 = torch.constant.bool false
%int1_1678 = torch.constant.int 1
%5179 = torch.aten.convolution %5150, %5162, %5174, %5177, %5175, %5176, %false_1677, %5178, %int1_1678 : !torch.vtensor<[1,128,80,80],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,80,80],f32>
%5180 = torch.aten.relu %5179 : !torch.vtensor<[1,64,80,80],f32> -> !torch.vtensor<[1,64,80,80],f32>
%5181 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1679 = torch.constant.int 12
%5183 = torch.aten.item %5181 : !torch.vtensor<[],f32> -> !torch.float
%5184 = torch.aten.item %5182 : !torch.vtensor<[],si8> -> !torch.int
%5185 = torch.aten.quantize_per_tensor %5180, %5183, %5184, %int12_1679 : !torch.vtensor<[1,64,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,80,80],!torch.qint8>
%5186 = torch.aten.int_repr %5185 : !torch.vtensor<[1,64,80,80],!torch.qint8> -> !torch.vtensor<[1,64,80,80],si8>
%5187 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5188 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5189 = torch.aten.item %5187 : !torch.vtensor<[],f32> -> !torch.float
%5190 = torch.aten.item %5188 : !torch.vtensor<[],si8> -> !torch.int
%5191 = torch.aten._make_per_tensor_quantized_tensor %5186, %5189, %5190 : !torch.vtensor<[1,64,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,80,80],!torch.qint8>
%5192 = torch.aten.dequantize.self %5191 : !torch.vtensor<[1,64,80,80],!torch.qint8> -> !torch.vtensor<[1,64,80,80],f32>
%int2_1680 = torch.constant.int 2
%int2_1681 = torch.constant.int 2
%5193 = torch.prim.ListConstruct %int2_1680, %int2_1681 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1682 = torch.constant.int 0
%int0_1683 = torch.constant.int 0
%5194 = torch.prim.ListConstruct %int0_1682, %int0_1683 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1684 = torch.constant.int 2
%int2_1685 = torch.constant.int 2
%5195 = torch.prim.ListConstruct %int2_1684, %int2_1685 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1686 = torch.constant.int 1
%int1_1687 = torch.constant.int 1
%5196 = torch.prim.ListConstruct %int1_1686, %int1_1687 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1688 = torch.constant.bool true
%5197 = torch.aten.max_pool2d %5192, %5193, %5195, %5194, %5196, %true_1688 : !torch.vtensor<[1,64,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,40,40],f32>
%5198 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5199 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1689 = torch.constant.int 12
%5200 = torch.aten.item %5198 : !torch.vtensor<[],f32> -> !torch.float
%5201 = torch.aten.item %5199 : !torch.vtensor<[],si8> -> !torch.int
%5202 = torch.aten.quantize_per_tensor %5197, %5200, %5201, %int12_1689 : !torch.vtensor<[1,64,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%5203 = torch.aten.int_repr %5202 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],si8>
%5204 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5205 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5206 = torch.aten.item %5204 : !torch.vtensor<[],f32> -> !torch.float
%5207 = torch.aten.item %5205 : !torch.vtensor<[],si8> -> !torch.int
%5208 = torch.aten._make_per_tensor_quantized_tensor %5203, %5206, %5207 : !torch.vtensor<[1,64,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%5209 = torch.aten.dequantize.self %5208 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],f32>
%5210 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%5211 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1690 = torch.constant.int 12
%5212 = torch.aten.item %5210 : !torch.vtensor<[],f32> -> !torch.float
%5213 = torch.aten.item %5211 : !torch.vtensor<[],si8> -> !torch.int
%5214 = torch.aten.quantize_per_tensor %156, %5212, %5213, %int12_1690 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5215 = torch.aten.int_repr %5214 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%5216 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%5217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5218 = torch.aten.item %5216 : !torch.vtensor<[],f32> -> !torch.float
%5219 = torch.aten.item %5217 : !torch.vtensor<[],si8> -> !torch.int
%5220 = torch.aten._make_per_tensor_quantized_tensor %5215, %5218, %5219 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5221 = torch.aten.dequantize.self %5220 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%5222 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5223 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1691 = torch.constant.int 12
%5224 = torch.aten.item %5222 : !torch.vtensor<[],f32> -> !torch.float
%5225 = torch.aten.item %5223 : !torch.vtensor<[],si8> -> !torch.int
%5226 = torch.aten.quantize_per_tensor %157, %5224, %5225, %int12_1691 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5227 = torch.aten.int_repr %5226 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5228 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5229 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5230 = torch.aten.item %5228 : !torch.vtensor<[],f32> -> !torch.float
%5231 = torch.aten.item %5229 : !torch.vtensor<[],si8> -> !torch.int
%5232 = torch.aten._make_per_tensor_quantized_tensor %5227, %5230, %5231 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5233 = torch.aten.dequantize.self %5232 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1692 = torch.constant.int 1
%int1_1693 = torch.constant.int 1
%int1_1694 = torch.constant.int 1
%int1_1695 = torch.constant.int 1
%int1_1696 = torch.constant.int 1
%int1_1697 = torch.constant.int 1
%int0_1698 = torch.constant.int 0
%5234 = torch.prim.ListConstruct %int1_1692, %int1_1693 : (!torch.int, !torch.int) -> !torch.list<int>
%5235 = torch.prim.ListConstruct %int1_1694, %int1_1695 : (!torch.int, !torch.int) -> !torch.list<int>
%5236 = torch.prim.ListConstruct %int1_1696, %int1_1697 : (!torch.int, !torch.int) -> !torch.list<int>
%5237 = torch.prim.ListConstruct %int0_1698, %int0_1698 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1699 = torch.constant.bool false
%int1_1700 = torch.constant.int 1
%5238 = torch.aten.convolution %5209, %5221, %5233, %5236, %5234, %5235, %false_1699, %5237, %int1_1700 : !torch.vtensor<[1,64,40,40],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,40,40],f32>
%5239 = torch.aten.relu %5238 : !torch.vtensor<[1,64,40,40],f32> -> !torch.vtensor<[1,64,40,40],f32>
%5240 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5241 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1701 = torch.constant.int 12
%5242 = torch.aten.item %5240 : !torch.vtensor<[],f32> -> !torch.float
%5243 = torch.aten.item %5241 : !torch.vtensor<[],si8> -> !torch.int
%5244 = torch.aten.quantize_per_tensor %5239, %5242, %5243, %int12_1701 : !torch.vtensor<[1,64,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%5245 = torch.aten.int_repr %5244 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],si8>
%5246 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5247 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5248 = torch.aten.item %5246 : !torch.vtensor<[],f32> -> !torch.float
%5249 = torch.aten.item %5247 : !torch.vtensor<[],si8> -> !torch.int
%5250 = torch.aten._make_per_tensor_quantized_tensor %5245, %5248, %5249 : !torch.vtensor<[1,64,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%5251 = torch.aten.dequantize.self %5250 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],f32>
%int2_1702 = torch.constant.int 2
%int2_1703 = torch.constant.int 2
%5252 = torch.prim.ListConstruct %int2_1702, %int2_1703 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1704 = torch.constant.int 0
%int0_1705 = torch.constant.int 0
%5253 = torch.prim.ListConstruct %int0_1704, %int0_1705 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1706 = torch.constant.int 2
%int2_1707 = torch.constant.int 2
%5254 = torch.prim.ListConstruct %int2_1706, %int2_1707 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1708 = torch.constant.int 1
%int1_1709 = torch.constant.int 1
%5255 = torch.prim.ListConstruct %int1_1708, %int1_1709 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1710 = torch.constant.bool true
%5256 = torch.aten.max_pool2d %5251, %5252, %5254, %5253, %5255, %true_1710 : !torch.vtensor<[1,64,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,20,20],f32>
%5257 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5258 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1711 = torch.constant.int 12
%5259 = torch.aten.item %5257 : !torch.vtensor<[],f32> -> !torch.float
%5260 = torch.aten.item %5258 : !torch.vtensor<[],si8> -> !torch.int
%5261 = torch.aten.quantize_per_tensor %5256, %5259, %5260, %int12_1711 : !torch.vtensor<[1,64,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%5262 = torch.aten.int_repr %5261 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],si8>
%5263 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5264 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5265 = torch.aten.item %5263 : !torch.vtensor<[],f32> -> !torch.float
%5266 = torch.aten.item %5264 : !torch.vtensor<[],si8> -> !torch.int
%5267 = torch.aten._make_per_tensor_quantized_tensor %5262, %5265, %5266 : !torch.vtensor<[1,64,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%5268 = torch.aten.dequantize.self %5267 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],f32>
%5269 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5270 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1712 = torch.constant.int 12
%5271 = torch.aten.item %5269 : !torch.vtensor<[],f32> -> !torch.float
%5272 = torch.aten.item %5270 : !torch.vtensor<[],si8> -> !torch.int
%5273 = torch.aten.quantize_per_tensor %158, %5271, %5272, %int12_1712 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5274 = torch.aten.int_repr %5273 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%5275 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5276 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5277 = torch.aten.item %5275 : !torch.vtensor<[],f32> -> !torch.float
%5278 = torch.aten.item %5276 : !torch.vtensor<[],si8> -> !torch.int
%5279 = torch.aten._make_per_tensor_quantized_tensor %5274, %5277, %5278 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5280 = torch.aten.dequantize.self %5279 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%5281 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5282 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1713 = torch.constant.int 12
%5283 = torch.aten.item %5281 : !torch.vtensor<[],f32> -> !torch.float
%5284 = torch.aten.item %5282 : !torch.vtensor<[],si8> -> !torch.int
%5285 = torch.aten.quantize_per_tensor %159, %5283, %5284, %int12_1713 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5286 = torch.aten.int_repr %5285 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5287 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5288 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5289 = torch.aten.item %5287 : !torch.vtensor<[],f32> -> !torch.float
%5290 = torch.aten.item %5288 : !torch.vtensor<[],si8> -> !torch.int
%5291 = torch.aten._make_per_tensor_quantized_tensor %5286, %5289, %5290 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5292 = torch.aten.dequantize.self %5291 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1714 = torch.constant.int 1
%int1_1715 = torch.constant.int 1
%int1_1716 = torch.constant.int 1
%int1_1717 = torch.constant.int 1
%int1_1718 = torch.constant.int 1
%int1_1719 = torch.constant.int 1
%int0_1720 = torch.constant.int 0
%5293 = torch.prim.ListConstruct %int1_1714, %int1_1715 : (!torch.int, !torch.int) -> !torch.list<int>
%5294 = torch.prim.ListConstruct %int1_1716, %int1_1717 : (!torch.int, !torch.int) -> !torch.list<int>
%5295 = torch.prim.ListConstruct %int1_1718, %int1_1719 : (!torch.int, !torch.int) -> !torch.list<int>
%5296 = torch.prim.ListConstruct %int0_1720, %int0_1720 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1721 = torch.constant.bool false
%int1_1722 = torch.constant.int 1
%5297 = torch.aten.convolution %5268, %5280, %5292, %5295, %5293, %5294, %false_1721, %5296, %int1_1722 : !torch.vtensor<[1,64,20,20],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,20,20],f32>
%5298 = torch.aten.relu %5297 : !torch.vtensor<[1,64,20,20],f32> -> !torch.vtensor<[1,64,20,20],f32>
%5299 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5300 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1723 = torch.constant.int 12
%5301 = torch.aten.item %5299 : !torch.vtensor<[],f32> -> !torch.float
%5302 = torch.aten.item %5300 : !torch.vtensor<[],si8> -> !torch.int
%5303 = torch.aten.quantize_per_tensor %5298, %5301, %5302, %int12_1723 : !torch.vtensor<[1,64,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%5304 = torch.aten.int_repr %5303 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],si8>
%5305 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5306 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5307 = torch.aten.item %5305 : !torch.vtensor<[],f32> -> !torch.float
%5308 = torch.aten.item %5306 : !torch.vtensor<[],si8> -> !torch.int
%5309 = torch.aten._make_per_tensor_quantized_tensor %5304, %5307, %5308 : !torch.vtensor<[1,64,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%5310 = torch.aten.dequantize.self %5309 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],f32>
%int2_1724 = torch.constant.int 2
%int2_1725 = torch.constant.int 2
%5311 = torch.prim.ListConstruct %int2_1724, %int2_1725 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1726 = torch.constant.int 0
%int0_1727 = torch.constant.int 0
%5312 = torch.prim.ListConstruct %int0_1726, %int0_1727 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1728 = torch.constant.int 2
%int2_1729 = torch.constant.int 2
%5313 = torch.prim.ListConstruct %int2_1728, %int2_1729 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1730 = torch.constant.int 1
%int1_1731 = torch.constant.int 1
%5314 = torch.prim.ListConstruct %int1_1730, %int1_1731 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1732 = torch.constant.bool true
%5315 = torch.aten.max_pool2d %5310, %5311, %5313, %5312, %5314, %true_1732 : !torch.vtensor<[1,64,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,10,10],f32>
%5316 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5317 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1733 = torch.constant.int 12
%5318 = torch.aten.item %5316 : !torch.vtensor<[],f32> -> !torch.float
%5319 = torch.aten.item %5317 : !torch.vtensor<[],si8> -> !torch.int
%5320 = torch.aten.quantize_per_tensor %5315, %5318, %5319, %int12_1733 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%5321 = torch.aten.int_repr %5320 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%5322 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5323 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5324 = torch.aten.item %5322 : !torch.vtensor<[],f32> -> !torch.float
%5325 = torch.aten.item %5323 : !torch.vtensor<[],si8> -> !torch.int
%5326 = torch.aten._make_per_tensor_quantized_tensor %5321, %5324, %5325 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%5327 = torch.aten.dequantize.self %5326 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%5328 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5329 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1734 = torch.constant.int 12
%5330 = torch.aten.item %5328 : !torch.vtensor<[],f32> -> !torch.float
%5331 = torch.aten.item %5329 : !torch.vtensor<[],si8> -> !torch.int
%5332 = torch.aten.quantize_per_tensor %160, %5330, %5331, %int12_1734 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5333 = torch.aten.int_repr %5332 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%5334 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5335 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5336 = torch.aten.item %5334 : !torch.vtensor<[],f32> -> !torch.float
%5337 = torch.aten.item %5335 : !torch.vtensor<[],si8> -> !torch.int
%5338 = torch.aten._make_per_tensor_quantized_tensor %5333, %5336, %5337 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5339 = torch.aten.dequantize.self %5338 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%5340 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5341 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1735 = torch.constant.int 12
%5342 = torch.aten.item %5340 : !torch.vtensor<[],f32> -> !torch.float
%5343 = torch.aten.item %5341 : !torch.vtensor<[],si8> -> !torch.int
%5344 = torch.aten.quantize_per_tensor %161, %5342, %5343, %int12_1735 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5345 = torch.aten.int_repr %5344 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5346 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5347 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5348 = torch.aten.item %5346 : !torch.vtensor<[],f32> -> !torch.float
%5349 = torch.aten.item %5347 : !torch.vtensor<[],si8> -> !torch.int
%5350 = torch.aten._make_per_tensor_quantized_tensor %5345, %5348, %5349 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5351 = torch.aten.dequantize.self %5350 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1736 = torch.constant.int 1
%int1_1737 = torch.constant.int 1
%int1_1738 = torch.constant.int 1
%int1_1739 = torch.constant.int 1
%int1_1740 = torch.constant.int 1
%int1_1741 = torch.constant.int 1
%int0_1742 = torch.constant.int 0
%5352 = torch.prim.ListConstruct %int1_1736, %int1_1737 : (!torch.int, !torch.int) -> !torch.list<int>
%5353 = torch.prim.ListConstruct %int1_1738, %int1_1739 : (!torch.int, !torch.int) -> !torch.list<int>
%5354 = torch.prim.ListConstruct %int1_1740, %int1_1741 : (!torch.int, !torch.int) -> !torch.list<int>
%5355 = torch.prim.ListConstruct %int0_1742, %int0_1742 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1743 = torch.constant.bool false
%int1_1744 = torch.constant.int 1
%5356 = torch.aten.convolution %5327, %5339, %5351, %5354, %5352, %5353, %false_1743, %5355, %int1_1744 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%5357 = torch.aten.relu %5356 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[1,64,10,10],f32>
%5358 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5359 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1745 = torch.constant.int 12
%5360 = torch.aten.item %5358 : !torch.vtensor<[],f32> -> !torch.float
%5361 = torch.aten.item %5359 : !torch.vtensor<[],si8> -> !torch.int
%5362 = torch.aten.quantize_per_tensor %5357, %5360, %5361, %int12_1745 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%5363 = torch.aten.int_repr %5362 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%5364 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5365 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5366 = torch.aten.item %5364 : !torch.vtensor<[],f32> -> !torch.float
%5367 = torch.aten.item %5365 : !torch.vtensor<[],si8> -> !torch.int
%5368 = torch.aten._make_per_tensor_quantized_tensor %5363, %5366, %5367 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%5369 = torch.aten.dequantize.self %5368 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%5370 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5371 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1746 = torch.constant.int 12
%5372 = torch.aten.item %5370 : !torch.vtensor<[],f32> -> !torch.float
%5373 = torch.aten.item %5371 : !torch.vtensor<[],si8> -> !torch.int
%5374 = torch.aten.quantize_per_tensor %162, %5372, %5373, %int12_1746 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5375 = torch.aten.int_repr %5374 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%5376 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5377 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5378 = torch.aten.item %5376 : !torch.vtensor<[],f32> -> !torch.float
%5379 = torch.aten.item %5377 : !torch.vtensor<[],si8> -> !torch.int
%5380 = torch.aten._make_per_tensor_quantized_tensor %5375, %5378, %5379 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%5381 = torch.aten.dequantize.self %5380 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%5382 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5383 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1747 = torch.constant.int 12
%5384 = torch.aten.item %5382 : !torch.vtensor<[],f32> -> !torch.float
%5385 = torch.aten.item %5383 : !torch.vtensor<[],si8> -> !torch.int
%5386 = torch.aten.quantize_per_tensor %163, %5384, %5385, %int12_1747 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5387 = torch.aten.int_repr %5386 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5388 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5389 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5390 = torch.aten.item %5388 : !torch.vtensor<[],f32> -> !torch.float
%5391 = torch.aten.item %5389 : !torch.vtensor<[],si8> -> !torch.int
%5392 = torch.aten._make_per_tensor_quantized_tensor %5387, %5390, %5391 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5393 = torch.aten.dequantize.self %5392 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int2_1748 = torch.constant.int 2
%int2_1749 = torch.constant.int 2
%int2_1750 = torch.constant.int 2
%int2_1751 = torch.constant.int 2
%int1_1752 = torch.constant.int 1
%int1_1753 = torch.constant.int 1
%int0_1754 = torch.constant.int 0
%5394 = torch.prim.ListConstruct %int2_1748, %int2_1749 : (!torch.int, !torch.int) -> !torch.list<int>
%5395 = torch.prim.ListConstruct %int2_1750, %int2_1751 : (!torch.int, !torch.int) -> !torch.list<int>
%5396 = torch.prim.ListConstruct %int1_1752, %int1_1753 : (!torch.int, !torch.int) -> !torch.list<int>
%5397 = torch.prim.ListConstruct %int0_1754, %int0_1754 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1755 = torch.constant.bool false
%int1_1756 = torch.constant.int 1
%5398 = torch.aten.convolution %5369, %5381, %5393, %5396, %5394, %5395, %false_1755, %5397, %int1_1756 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%5399 = torch.aten.relu %5398 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[1,64,10,10],f32>
%5400 = torch.prim.ListConstruct %5399, %5369 : (!torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[1,64,10,10],f32>) -> !torch.list<vtensor>
%int1_1757 = torch.constant.int 1
%5401 = torch.aten.cat %5400, %int1_1757 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,10,10],f32>
%5402 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5403 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1758 = torch.constant.int 12
%5404 = torch.aten.item %5402 : !torch.vtensor<[],f32> -> !torch.float
%5405 = torch.aten.item %5403 : !torch.vtensor<[],si8> -> !torch.int
%5406 = torch.aten.quantize_per_tensor %5401, %5404, %5405, %int12_1758 : !torch.vtensor<[1,128,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%5407 = torch.aten.int_repr %5406 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],si8>
%5408 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5410 = torch.aten.item %5408 : !torch.vtensor<[],f32> -> !torch.float
%5411 = torch.aten.item %5409 : !torch.vtensor<[],si8> -> !torch.int
%5412 = torch.aten._make_per_tensor_quantized_tensor %5407, %5410, %5411 : !torch.vtensor<[1,128,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,10,10],!torch.qint8>
%5413 = torch.aten.dequantize.self %5412 : !torch.vtensor<[1,128,10,10],!torch.qint8> -> !torch.vtensor<[1,128,10,10],f32>
%5414 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%5415 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1759 = torch.constant.int 12
%5416 = torch.aten.item %5414 : !torch.vtensor<[],f32> -> !torch.float
%5417 = torch.aten.item %5415 : !torch.vtensor<[],si8> -> !torch.int
%5418 = torch.aten.quantize_per_tensor %164, %5416, %5417, %int12_1759 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5419 = torch.aten.int_repr %5418 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%5420 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%5421 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5422 = torch.aten.item %5420 : !torch.vtensor<[],f32> -> !torch.float
%5423 = torch.aten.item %5421 : !torch.vtensor<[],si8> -> !torch.int
%5424 = torch.aten._make_per_tensor_quantized_tensor %5419, %5422, %5423 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5425 = torch.aten.dequantize.self %5424 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%5426 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5427 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1760 = torch.constant.int 12
%5428 = torch.aten.item %5426 : !torch.vtensor<[],f32> -> !torch.float
%5429 = torch.aten.item %5427 : !torch.vtensor<[],si8> -> !torch.int
%5430 = torch.aten.quantize_per_tensor %165, %5428, %5429, %int12_1760 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5431 = torch.aten.int_repr %5430 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5432 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5433 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5434 = torch.aten.item %5432 : !torch.vtensor<[],f32> -> !torch.float
%5435 = torch.aten.item %5433 : !torch.vtensor<[],si8> -> !torch.int
%5436 = torch.aten._make_per_tensor_quantized_tensor %5431, %5434, %5435 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5437 = torch.aten.dequantize.self %5436 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1761 = torch.constant.int 1
%int1_1762 = torch.constant.int 1
%int1_1763 = torch.constant.int 1
%int1_1764 = torch.constant.int 1
%int1_1765 = torch.constant.int 1
%int1_1766 = torch.constant.int 1
%int0_1767 = torch.constant.int 0
%5438 = torch.prim.ListConstruct %int1_1761, %int1_1762 : (!torch.int, !torch.int) -> !torch.list<int>
%5439 = torch.prim.ListConstruct %int1_1763, %int1_1764 : (!torch.int, !torch.int) -> !torch.list<int>
%5440 = torch.prim.ListConstruct %int1_1765, %int1_1766 : (!torch.int, !torch.int) -> !torch.list<int>
%5441 = torch.prim.ListConstruct %int0_1767, %int0_1767 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1768 = torch.constant.bool false
%int1_1769 = torch.constant.int 1
%5442 = torch.aten.convolution %5413, %5425, %5437, %5440, %5438, %5439, %false_1768, %5441, %int1_1769 : !torch.vtensor<[1,128,10,10],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%5443 = torch.aten.relu %5442 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[1,64,10,10],f32>
%5444 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5445 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1770 = torch.constant.int 12
%5446 = torch.aten.item %5444 : !torch.vtensor<[],f32> -> !torch.float
%5447 = torch.aten.item %5445 : !torch.vtensor<[],si8> -> !torch.int
%5448 = torch.aten.quantize_per_tensor %5443, %5446, %5447, %int12_1770 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%5449 = torch.aten.int_repr %5448 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%5450 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5451 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5452 = torch.aten.item %5450 : !torch.vtensor<[],f32> -> !torch.float
%5453 = torch.aten.item %5451 : !torch.vtensor<[],si8> -> !torch.int
%5454 = torch.aten._make_per_tensor_quantized_tensor %5449, %5452, %5453 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%5455 = torch.aten.dequantize.self %5454 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%5456 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%5457 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%5458 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1771 = torch.constant.int 0
%int0_1772 = torch.constant.int 0
%int0_1773 = torch.constant.int 0
%5459 = torch.aten.select.int %5458, %int0_1771, %int0_1773 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5460 = torch.aten.item %5459 : !torch.vtensor<[1],si64> -> !torch.int
%5461 = torch.aten.lt.int %5460, %int0_1771 : !torch.int, !torch.int -> !torch.bool
%5462 = torch.aten.Int.bool %5461 : !torch.bool -> !torch.int
%5463 = torch.aten.mul.int %5462, %int0_1772 : !torch.int, !torch.int -> !torch.int
%5464 = torch.aten.add.int %5460, %5463 : !torch.int, !torch.int -> !torch.int
%5465 = torch.prim.ListConstruct %5464 : (!torch.int) -> !torch.list<int>
%false_1774 = torch.constant.bool false
%none_1775 = torch.constant.none
%5466 = torch.aten.tensor %5465, %none_1775, %none_1775, %false_1774 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1776, %indices_1777 = torch.aten.sort %5466, %int0_1771, %false_1774 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1778 = torch.constant.int 0
%5467 = torch.aten.select.int %values_1776, %int0_1771, %int0_1778 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5468 = torch.aten.item %5467 : !torch.vtensor<[1],si64> -> !torch.int
%5469 = torch.aten.unsqueeze %5456, %5468 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5470 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1779 = torch.constant.int 0
%int0_1780 = torch.constant.int 0
%int0_1781 = torch.constant.int 0
%5471 = torch.aten.select.int %5470, %int0_1779, %int0_1781 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5472 = torch.aten.item %5471 : !torch.vtensor<[1],si64> -> !torch.int
%5473 = torch.aten.lt.int %5472, %int0_1779 : !torch.int, !torch.int -> !torch.bool
%5474 = torch.aten.Int.bool %5473 : !torch.bool -> !torch.int
%5475 = torch.aten.mul.int %5474, %int0_1780 : !torch.int, !torch.int -> !torch.int
%5476 = torch.aten.add.int %5472, %5475 : !torch.int, !torch.int -> !torch.int
%5477 = torch.prim.ListConstruct %5476 : (!torch.int) -> !torch.list<int>
%false_1782 = torch.constant.bool false
%none_1783 = torch.constant.none
%5478 = torch.aten.tensor %5477, %none_1783, %none_1783, %false_1782 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1784, %indices_1785 = torch.aten.sort %5478, %int0_1779, %false_1782 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1786 = torch.constant.int 0
%5479 = torch.aten.select.int %values_1784, %int0_1779, %int0_1786 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5480 = torch.aten.item %5479 : !torch.vtensor<[1],si64> -> !torch.int
%5481 = torch.aten.unsqueeze %5457, %5480 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5482 = torch.prim.ListConstruct %5469, %5481 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1787 = torch.constant.int 0
%5483 = torch.aten.cat %5482, %int0_1787 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%5484 = torch.aten._shape_as_tensor %5455 : !torch.vtensor<[1,64,10,10],f32> -> !torch.vtensor<[4],si64>
%5485 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5486 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5487 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1788 = torch.constant.none
%int1_1789 = torch.constant.int 1
%5488 = torch.prim.ListConstruct %int1_1789 : (!torch.int) -> !torch.list<int>
%5489 = torch.aten.ones %5488, %none_1788, %none_1788, %none_1788, %none_1788 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1790 = torch.constant.int 0
%int0_1791 = torch.constant.int 0
%5490 = torch.prim.NumToTensor.Scalar %int0_1791 : !torch.int -> !torch.vtensor<[1],si64>
%5491 = torch.aten.index_select %5486, %int0_1790, %5490 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5492 = torch.aten.item %5491 : !torch.vtensor<[1],si64> -> !torch.int
%5493 = torch.aten.index_select %5487, %int0_1790, %5490 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5494 = torch.aten.item %5493 : !torch.vtensor<[1],si64> -> !torch.int
%5495 = torch.aten.index_select %5485, %int0_1790, %5490 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5496 = torch.aten.item %5495 : !torch.vtensor<[1],si64> -> !torch.int
%5497 = torch.aten.index_select %5489, %int0_1790, %5490 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5498 = torch.aten.item %5497 : !torch.vtensor<[1],si64> -> !torch.int
%5499 = torch.aten.slice.Tensor %5484, %5496, %5492, %5494, %5498 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1792 = torch.constant.int 4
%none_1793 = torch.constant.none
%false_1794 = torch.constant.bool false
%5500 = torch.aten.to.dtype %5483, %int4_1792, %false_1794, %false_1794, %none_1793 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%5501 = torch.prim.ListConstruct %5499, %5500 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1795 = torch.constant.int 0
%5502 = torch.aten.cat %5501, %int0_1795 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%5503 = torch.operator "onnx.Resize"(%5455, %none, %none, %5502) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%5504 = torch.prim.ListConstruct %5503, %5310 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,20,20],f32>) -> !torch.list<vtensor>
%int1_1796 = torch.constant.int 1
%5505 = torch.aten.cat %5504, %int1_1796 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%5506 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5507 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1797 = torch.constant.int 12
%5508 = torch.aten.item %5506 : !torch.vtensor<[],f32> -> !torch.float
%5509 = torch.aten.item %5507 : !torch.vtensor<[],si8> -> !torch.int
%5510 = torch.aten.quantize_per_tensor %5505, %5508, %5509, %int12_1797 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%5511 = torch.aten.int_repr %5510 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%5512 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5513 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5514 = torch.aten.item %5512 : !torch.vtensor<[],f32> -> !torch.float
%5515 = torch.aten.item %5513 : !torch.vtensor<[],si8> -> !torch.int
%5516 = torch.aten._make_per_tensor_quantized_tensor %5511, %5514, %5515 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%5517 = torch.aten.dequantize.self %5516 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%5518 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%5519 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1798 = torch.constant.int 12
%5520 = torch.aten.item %5518 : !torch.vtensor<[],f32> -> !torch.float
%5521 = torch.aten.item %5519 : !torch.vtensor<[],si8> -> !torch.int
%5522 = torch.aten.quantize_per_tensor %166, %5520, %5521, %int12_1798 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5523 = torch.aten.int_repr %5522 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%5524 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%5525 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5526 = torch.aten.item %5524 : !torch.vtensor<[],f32> -> !torch.float
%5527 = torch.aten.item %5525 : !torch.vtensor<[],si8> -> !torch.int
%5528 = torch.aten._make_per_tensor_quantized_tensor %5523, %5526, %5527 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5529 = torch.aten.dequantize.self %5528 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%5530 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5531 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1799 = torch.constant.int 12
%5532 = torch.aten.item %5530 : !torch.vtensor<[],f32> -> !torch.float
%5533 = torch.aten.item %5531 : !torch.vtensor<[],si8> -> !torch.int
%5534 = torch.aten.quantize_per_tensor %167, %5532, %5533, %int12_1799 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5535 = torch.aten.int_repr %5534 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5536 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5537 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5538 = torch.aten.item %5536 : !torch.vtensor<[],f32> -> !torch.float
%5539 = torch.aten.item %5537 : !torch.vtensor<[],si8> -> !torch.int
%5540 = torch.aten._make_per_tensor_quantized_tensor %5535, %5538, %5539 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5541 = torch.aten.dequantize.self %5540 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1800 = torch.constant.int 1
%int1_1801 = torch.constant.int 1
%int1_1802 = torch.constant.int 1
%int1_1803 = torch.constant.int 1
%int1_1804 = torch.constant.int 1
%int1_1805 = torch.constant.int 1
%int0_1806 = torch.constant.int 0
%5542 = torch.prim.ListConstruct %int1_1800, %int1_1801 : (!torch.int, !torch.int) -> !torch.list<int>
%5543 = torch.prim.ListConstruct %int1_1802, %int1_1803 : (!torch.int, !torch.int) -> !torch.list<int>
%5544 = torch.prim.ListConstruct %int1_1804, %int1_1805 : (!torch.int, !torch.int) -> !torch.list<int>
%5545 = torch.prim.ListConstruct %int0_1806, %int0_1806 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1807 = torch.constant.bool false
%int1_1808 = torch.constant.int 1
%5546 = torch.aten.convolution %5517, %5529, %5541, %5544, %5542, %5543, %false_1807, %5545, %int1_1808 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,20,20],f32>
%5547 = torch.aten.relu %5546 : !torch.vtensor<[1,64,20,20],f32> -> !torch.vtensor<[1,64,20,20],f32>
%5548 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5549 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1809 = torch.constant.int 12
%5550 = torch.aten.item %5548 : !torch.vtensor<[],f32> -> !torch.float
%5551 = torch.aten.item %5549 : !torch.vtensor<[],si8> -> !torch.int
%5552 = torch.aten.quantize_per_tensor %5547, %5550, %5551, %int12_1809 : !torch.vtensor<[1,64,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%5553 = torch.aten.int_repr %5552 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],si8>
%5554 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5555 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5556 = torch.aten.item %5554 : !torch.vtensor<[],f32> -> !torch.float
%5557 = torch.aten.item %5555 : !torch.vtensor<[],si8> -> !torch.int
%5558 = torch.aten._make_per_tensor_quantized_tensor %5553, %5556, %5557 : !torch.vtensor<[1,64,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,20,20],!torch.qint8>
%5559 = torch.aten.dequantize.self %5558 : !torch.vtensor<[1,64,20,20],!torch.qint8> -> !torch.vtensor<[1,64,20,20],f32>
%5560 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%5561 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%5562 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1810 = torch.constant.int 0
%int0_1811 = torch.constant.int 0
%int0_1812 = torch.constant.int 0
%5563 = torch.aten.select.int %5562, %int0_1810, %int0_1812 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5564 = torch.aten.item %5563 : !torch.vtensor<[1],si64> -> !torch.int
%5565 = torch.aten.lt.int %5564, %int0_1810 : !torch.int, !torch.int -> !torch.bool
%5566 = torch.aten.Int.bool %5565 : !torch.bool -> !torch.int
%5567 = torch.aten.mul.int %5566, %int0_1811 : !torch.int, !torch.int -> !torch.int
%5568 = torch.aten.add.int %5564, %5567 : !torch.int, !torch.int -> !torch.int
%5569 = torch.prim.ListConstruct %5568 : (!torch.int) -> !torch.list<int>
%false_1813 = torch.constant.bool false
%none_1814 = torch.constant.none
%5570 = torch.aten.tensor %5569, %none_1814, %none_1814, %false_1813 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1815, %indices_1816 = torch.aten.sort %5570, %int0_1810, %false_1813 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1817 = torch.constant.int 0
%5571 = torch.aten.select.int %values_1815, %int0_1810, %int0_1817 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5572 = torch.aten.item %5571 : !torch.vtensor<[1],si64> -> !torch.int
%5573 = torch.aten.unsqueeze %5560, %5572 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5574 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1818 = torch.constant.int 0
%int0_1819 = torch.constant.int 0
%int0_1820 = torch.constant.int 0
%5575 = torch.aten.select.int %5574, %int0_1818, %int0_1820 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5576 = torch.aten.item %5575 : !torch.vtensor<[1],si64> -> !torch.int
%5577 = torch.aten.lt.int %5576, %int0_1818 : !torch.int, !torch.int -> !torch.bool
%5578 = torch.aten.Int.bool %5577 : !torch.bool -> !torch.int
%5579 = torch.aten.mul.int %5578, %int0_1819 : !torch.int, !torch.int -> !torch.int
%5580 = torch.aten.add.int %5576, %5579 : !torch.int, !torch.int -> !torch.int
%5581 = torch.prim.ListConstruct %5580 : (!torch.int) -> !torch.list<int>
%false_1821 = torch.constant.bool false
%none_1822 = torch.constant.none
%5582 = torch.aten.tensor %5581, %none_1822, %none_1822, %false_1821 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1823, %indices_1824 = torch.aten.sort %5582, %int0_1818, %false_1821 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1825 = torch.constant.int 0
%5583 = torch.aten.select.int %values_1823, %int0_1818, %int0_1825 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5584 = torch.aten.item %5583 : !torch.vtensor<[1],si64> -> !torch.int
%5585 = torch.aten.unsqueeze %5561, %5584 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5586 = torch.prim.ListConstruct %5573, %5585 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1826 = torch.constant.int 0
%5587 = torch.aten.cat %5586, %int0_1826 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%5588 = torch.aten._shape_as_tensor %5559 : !torch.vtensor<[1,64,20,20],f32> -> !torch.vtensor<[4],si64>
%5589 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5590 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5591 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1827 = torch.constant.none
%int1_1828 = torch.constant.int 1
%5592 = torch.prim.ListConstruct %int1_1828 : (!torch.int) -> !torch.list<int>
%5593 = torch.aten.ones %5592, %none_1827, %none_1827, %none_1827, %none_1827 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1829 = torch.constant.int 0
%int0_1830 = torch.constant.int 0
%5594 = torch.prim.NumToTensor.Scalar %int0_1830 : !torch.int -> !torch.vtensor<[1],si64>
%5595 = torch.aten.index_select %5590, %int0_1829, %5594 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5596 = torch.aten.item %5595 : !torch.vtensor<[1],si64> -> !torch.int
%5597 = torch.aten.index_select %5591, %int0_1829, %5594 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5598 = torch.aten.item %5597 : !torch.vtensor<[1],si64> -> !torch.int
%5599 = torch.aten.index_select %5589, %int0_1829, %5594 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5600 = torch.aten.item %5599 : !torch.vtensor<[1],si64> -> !torch.int
%5601 = torch.aten.index_select %5593, %int0_1829, %5594 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5602 = torch.aten.item %5601 : !torch.vtensor<[1],si64> -> !torch.int
%5603 = torch.aten.slice.Tensor %5588, %5600, %5596, %5598, %5602 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1831 = torch.constant.int 4
%none_1832 = torch.constant.none
%false_1833 = torch.constant.bool false
%5604 = torch.aten.to.dtype %5587, %int4_1831, %false_1833, %false_1833, %none_1832 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%5605 = torch.prim.ListConstruct %5603, %5604 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1834 = torch.constant.int 0
%5606 = torch.aten.cat %5605, %int0_1834 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%5607 = torch.operator "onnx.Resize"(%5559, %none, %none, %5606) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%5608 = torch.prim.ListConstruct %5607, %5251 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,40,40],f32>) -> !torch.list<vtensor>
%int1_1835 = torch.constant.int 1
%5609 = torch.aten.cat %5608, %int1_1835 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%5610 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5611 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1836 = torch.constant.int 12
%5612 = torch.aten.item %5610 : !torch.vtensor<[],f32> -> !torch.float
%5613 = torch.aten.item %5611 : !torch.vtensor<[],si8> -> !torch.int
%5614 = torch.aten.quantize_per_tensor %5609, %5612, %5613, %int12_1836 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%5615 = torch.aten.int_repr %5614 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%5616 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5617 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5618 = torch.aten.item %5616 : !torch.vtensor<[],f32> -> !torch.float
%5619 = torch.aten.item %5617 : !torch.vtensor<[],si8> -> !torch.int
%5620 = torch.aten._make_per_tensor_quantized_tensor %5615, %5618, %5619 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%5621 = torch.aten.dequantize.self %5620 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%5622 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5623 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1837 = torch.constant.int 12
%5624 = torch.aten.item %5622 : !torch.vtensor<[],f32> -> !torch.float
%5625 = torch.aten.item %5623 : !torch.vtensor<[],si8> -> !torch.int
%5626 = torch.aten.quantize_per_tensor %168, %5624, %5625, %int12_1837 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5627 = torch.aten.int_repr %5626 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%5628 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5629 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5630 = torch.aten.item %5628 : !torch.vtensor<[],f32> -> !torch.float
%5631 = torch.aten.item %5629 : !torch.vtensor<[],si8> -> !torch.int
%5632 = torch.aten._make_per_tensor_quantized_tensor %5627, %5630, %5631 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%5633 = torch.aten.dequantize.self %5632 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%5634 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5635 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1838 = torch.constant.int 12
%5636 = torch.aten.item %5634 : !torch.vtensor<[],f32> -> !torch.float
%5637 = torch.aten.item %5635 : !torch.vtensor<[],si8> -> !torch.int
%5638 = torch.aten.quantize_per_tensor %169, %5636, %5637, %int12_1838 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5639 = torch.aten.int_repr %5638 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5640 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5641 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5642 = torch.aten.item %5640 : !torch.vtensor<[],f32> -> !torch.float
%5643 = torch.aten.item %5641 : !torch.vtensor<[],si8> -> !torch.int
%5644 = torch.aten._make_per_tensor_quantized_tensor %5639, %5642, %5643 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5645 = torch.aten.dequantize.self %5644 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1839 = torch.constant.int 1
%int1_1840 = torch.constant.int 1
%int1_1841 = torch.constant.int 1
%int1_1842 = torch.constant.int 1
%int1_1843 = torch.constant.int 1
%int1_1844 = torch.constant.int 1
%int0_1845 = torch.constant.int 0
%5646 = torch.prim.ListConstruct %int1_1839, %int1_1840 : (!torch.int, !torch.int) -> !torch.list<int>
%5647 = torch.prim.ListConstruct %int1_1841, %int1_1842 : (!torch.int, !torch.int) -> !torch.list<int>
%5648 = torch.prim.ListConstruct %int1_1843, %int1_1844 : (!torch.int, !torch.int) -> !torch.list<int>
%5649 = torch.prim.ListConstruct %int0_1845, %int0_1845 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1846 = torch.constant.bool false
%int1_1847 = torch.constant.int 1
%5650 = torch.aten.convolution %5621, %5633, %5645, %5648, %5646, %5647, %false_1846, %5649, %int1_1847 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,40,40],f32>
%5651 = torch.aten.relu %5650 : !torch.vtensor<[1,64,40,40],f32> -> !torch.vtensor<[1,64,40,40],f32>
%5652 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5653 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1848 = torch.constant.int 12
%5654 = torch.aten.item %5652 : !torch.vtensor<[],f32> -> !torch.float
%5655 = torch.aten.item %5653 : !torch.vtensor<[],si8> -> !torch.int
%5656 = torch.aten.quantize_per_tensor %5651, %5654, %5655, %int12_1848 : !torch.vtensor<[1,64,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%5657 = torch.aten.int_repr %5656 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],si8>
%5658 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5659 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5660 = torch.aten.item %5658 : !torch.vtensor<[],f32> -> !torch.float
%5661 = torch.aten.item %5659 : !torch.vtensor<[],si8> -> !torch.int
%5662 = torch.aten._make_per_tensor_quantized_tensor %5657, %5660, %5661 : !torch.vtensor<[1,64,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,40,40],!torch.qint8>
%5663 = torch.aten.dequantize.self %5662 : !torch.vtensor<[1,64,40,40],!torch.qint8> -> !torch.vtensor<[1,64,40,40],f32>
%5664 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%5665 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%5666 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1849 = torch.constant.int 0
%int0_1850 = torch.constant.int 0
%int0_1851 = torch.constant.int 0
%5667 = torch.aten.select.int %5666, %int0_1849, %int0_1851 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5668 = torch.aten.item %5667 : !torch.vtensor<[1],si64> -> !torch.int
%5669 = torch.aten.lt.int %5668, %int0_1849 : !torch.int, !torch.int -> !torch.bool
%5670 = torch.aten.Int.bool %5669 : !torch.bool -> !torch.int
%5671 = torch.aten.mul.int %5670, %int0_1850 : !torch.int, !torch.int -> !torch.int
%5672 = torch.aten.add.int %5668, %5671 : !torch.int, !torch.int -> !torch.int
%5673 = torch.prim.ListConstruct %5672 : (!torch.int) -> !torch.list<int>
%false_1852 = torch.constant.bool false
%none_1853 = torch.constant.none
%5674 = torch.aten.tensor %5673, %none_1853, %none_1853, %false_1852 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1854, %indices_1855 = torch.aten.sort %5674, %int0_1849, %false_1852 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1856 = torch.constant.int 0
%5675 = torch.aten.select.int %values_1854, %int0_1849, %int0_1856 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5676 = torch.aten.item %5675 : !torch.vtensor<[1],si64> -> !torch.int
%5677 = torch.aten.unsqueeze %5664, %5676 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5678 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1857 = torch.constant.int 0
%int0_1858 = torch.constant.int 0
%int0_1859 = torch.constant.int 0
%5679 = torch.aten.select.int %5678, %int0_1857, %int0_1859 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5680 = torch.aten.item %5679 : !torch.vtensor<[1],si64> -> !torch.int
%5681 = torch.aten.lt.int %5680, %int0_1857 : !torch.int, !torch.int -> !torch.bool
%5682 = torch.aten.Int.bool %5681 : !torch.bool -> !torch.int
%5683 = torch.aten.mul.int %5682, %int0_1858 : !torch.int, !torch.int -> !torch.int
%5684 = torch.aten.add.int %5680, %5683 : !torch.int, !torch.int -> !torch.int
%5685 = torch.prim.ListConstruct %5684 : (!torch.int) -> !torch.list<int>
%false_1860 = torch.constant.bool false
%none_1861 = torch.constant.none
%5686 = torch.aten.tensor %5685, %none_1861, %none_1861, %false_1860 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1862, %indices_1863 = torch.aten.sort %5686, %int0_1857, %false_1860 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1864 = torch.constant.int 0
%5687 = torch.aten.select.int %values_1862, %int0_1857, %int0_1864 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5688 = torch.aten.item %5687 : !torch.vtensor<[1],si64> -> !torch.int
%5689 = torch.aten.unsqueeze %5665, %5688 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5690 = torch.prim.ListConstruct %5677, %5689 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1865 = torch.constant.int 0
%5691 = torch.aten.cat %5690, %int0_1865 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%5692 = torch.aten._shape_as_tensor %5663 : !torch.vtensor<[1,64,40,40],f32> -> !torch.vtensor<[4],si64>
%5693 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5694 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5695 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1866 = torch.constant.none
%int1_1867 = torch.constant.int 1
%5696 = torch.prim.ListConstruct %int1_1867 : (!torch.int) -> !torch.list<int>
%5697 = torch.aten.ones %5696, %none_1866, %none_1866, %none_1866, %none_1866 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1868 = torch.constant.int 0
%int0_1869 = torch.constant.int 0
%5698 = torch.prim.NumToTensor.Scalar %int0_1869 : !torch.int -> !torch.vtensor<[1],si64>
%5699 = torch.aten.index_select %5694, %int0_1868, %5698 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5700 = torch.aten.item %5699 : !torch.vtensor<[1],si64> -> !torch.int
%5701 = torch.aten.index_select %5695, %int0_1868, %5698 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5702 = torch.aten.item %5701 : !torch.vtensor<[1],si64> -> !torch.int
%5703 = torch.aten.index_select %5693, %int0_1868, %5698 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5704 = torch.aten.item %5703 : !torch.vtensor<[1],si64> -> !torch.int
%5705 = torch.aten.index_select %5697, %int0_1868, %5698 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5706 = torch.aten.item %5705 : !torch.vtensor<[1],si64> -> !torch.int
%5707 = torch.aten.slice.Tensor %5692, %5704, %5700, %5702, %5706 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1870 = torch.constant.int 4
%none_1871 = torch.constant.none
%false_1872 = torch.constant.bool false
%5708 = torch.aten.to.dtype %5691, %int4_1870, %false_1872, %false_1872, %none_1871 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%5709 = torch.prim.ListConstruct %5707, %5708 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1873 = torch.constant.int 0
%5710 = torch.aten.cat %5709, %int0_1873 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%5711 = torch.operator "onnx.Resize"(%5663, %none, %none, %5710) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,40,40],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%5712 = torch.prim.ListConstruct %5711, %5192 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,80,80],f32>) -> !torch.list<vtensor>
%int1_1874 = torch.constant.int 1
%5713 = torch.aten.cat %5712, %int1_1874 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,80,80],f32>
%5714 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5715 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1875 = torch.constant.int 12
%5716 = torch.aten.item %5714 : !torch.vtensor<[],f32> -> !torch.float
%5717 = torch.aten.item %5715 : !torch.vtensor<[],si8> -> !torch.int
%5718 = torch.aten.quantize_per_tensor %5713, %5716, %5717, %int12_1875 : !torch.vtensor<[1,?,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%5719 = torch.aten.int_repr %5718 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],si8>
%5720 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5721 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5722 = torch.aten.item %5720 : !torch.vtensor<[],f32> -> !torch.float
%5723 = torch.aten.item %5721 : !torch.vtensor<[],si8> -> !torch.int
%5724 = torch.aten._make_per_tensor_quantized_tensor %5719, %5722, %5723 : !torch.vtensor<[1,?,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%5725 = torch.aten.dequantize.self %5724 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],f32>
%5726 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5727 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1876 = torch.constant.int 12
%5728 = torch.aten.item %5726 : !torch.vtensor<[],f32> -> !torch.float
%5729 = torch.aten.item %5727 : !torch.vtensor<[],si8> -> !torch.int
%5730 = torch.aten.quantize_per_tensor %170, %5728, %5729, %int12_1876 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%5731 = torch.aten.int_repr %5730 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%5732 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5733 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5734 = torch.aten.item %5732 : !torch.vtensor<[],f32> -> !torch.float
%5735 = torch.aten.item %5733 : !torch.vtensor<[],si8> -> !torch.int
%5736 = torch.aten._make_per_tensor_quantized_tensor %5731, %5734, %5735 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%5737 = torch.aten.dequantize.self %5736 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%5738 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5739 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1877 = torch.constant.int 12
%5740 = torch.aten.item %5738 : !torch.vtensor<[],f32> -> !torch.float
%5741 = torch.aten.item %5739 : !torch.vtensor<[],si8> -> !torch.int
%5742 = torch.aten.quantize_per_tensor %171, %5740, %5741, %int12_1877 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%5743 = torch.aten.int_repr %5742 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%5744 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5745 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5746 = torch.aten.item %5744 : !torch.vtensor<[],f32> -> !torch.float
%5747 = torch.aten.item %5745 : !torch.vtensor<[],si8> -> !torch.int
%5748 = torch.aten._make_per_tensor_quantized_tensor %5743, %5746, %5747 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%5749 = torch.aten.dequantize.self %5748 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_1878 = torch.constant.int 1
%int1_1879 = torch.constant.int 1
%int1_1880 = torch.constant.int 1
%int1_1881 = torch.constant.int 1
%int1_1882 = torch.constant.int 1
%int1_1883 = torch.constant.int 1
%int0_1884 = torch.constant.int 0
%5750 = torch.prim.ListConstruct %int1_1878, %int1_1879 : (!torch.int, !torch.int) -> !torch.list<int>
%5751 = torch.prim.ListConstruct %int1_1880, %int1_1881 : (!torch.int, !torch.int) -> !torch.list<int>
%5752 = torch.prim.ListConstruct %int1_1882, %int1_1883 : (!torch.int, !torch.int) -> !torch.list<int>
%5753 = torch.prim.ListConstruct %int0_1884, %int0_1884 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1885 = torch.constant.bool false
%int1_1886 = torch.constant.int 1
%5754 = torch.aten.convolution %5725, %5737, %5749, %5752, %5750, %5751, %false_1885, %5753, %int1_1886 : !torch.vtensor<[1,?,80,80],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,80,80],f32>
%5755 = torch.aten.relu %5754 : !torch.vtensor<[1,128,80,80],f32> -> !torch.vtensor<[1,128,80,80],f32>
%5756 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5757 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1887 = torch.constant.int 12
%5758 = torch.aten.item %5756 : !torch.vtensor<[],f32> -> !torch.float
%5759 = torch.aten.item %5757 : !torch.vtensor<[],si8> -> !torch.int
%5760 = torch.aten.quantize_per_tensor %5755, %5758, %5759, %int12_1887 : !torch.vtensor<[1,128,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%5761 = torch.aten.int_repr %5760 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],si8>
%5762 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5763 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5764 = torch.aten.item %5762 : !torch.vtensor<[],f32> -> !torch.float
%5765 = torch.aten.item %5763 : !torch.vtensor<[],si8> -> !torch.int
%5766 = torch.aten._make_per_tensor_quantized_tensor %5761, %5764, %5765 : !torch.vtensor<[1,128,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%5767 = torch.aten.dequantize.self %5766 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],f32>
%int1_1888 = torch.constant.int 1
%5768 = torch.aten.add.Tensor %5767, %5150, %int1_1888 : !torch.vtensor<[1,128,80,80],f32>, !torch.vtensor<[1,128,80,80],f32>, !torch.int -> !torch.vtensor<[1,128,80,80],f32>
%5769 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5770 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1889 = torch.constant.int 12
%5771 = torch.aten.item %5769 : !torch.vtensor<[],f32> -> !torch.float
%5772 = torch.aten.item %5770 : !torch.vtensor<[],si8> -> !torch.int
%5773 = torch.aten.quantize_per_tensor %5768, %5771, %5772, %int12_1889 : !torch.vtensor<[1,128,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%5774 = torch.aten.int_repr %5773 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],si8>
%5775 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5776 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5777 = torch.aten.item %5775 : !torch.vtensor<[],f32> -> !torch.float
%5778 = torch.aten.item %5776 : !torch.vtensor<[],si8> -> !torch.int
%5779 = torch.aten._make_per_tensor_quantized_tensor %5774, %5777, %5778 : !torch.vtensor<[1,128,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,80,80],!torch.qint8>
%5780 = torch.aten.dequantize.self %5779 : !torch.vtensor<[1,128,80,80],!torch.qint8> -> !torch.vtensor<[1,128,80,80],f32>
%5781 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%5782 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%5783 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1890 = torch.constant.int 0
%int0_1891 = torch.constant.int 0
%int0_1892 = torch.constant.int 0
%5784 = torch.aten.select.int %5783, %int0_1890, %int0_1892 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5785 = torch.aten.item %5784 : !torch.vtensor<[1],si64> -> !torch.int
%5786 = torch.aten.lt.int %5785, %int0_1890 : !torch.int, !torch.int -> !torch.bool
%5787 = torch.aten.Int.bool %5786 : !torch.bool -> !torch.int
%5788 = torch.aten.mul.int %5787, %int0_1891 : !torch.int, !torch.int -> !torch.int
%5789 = torch.aten.add.int %5785, %5788 : !torch.int, !torch.int -> !torch.int
%5790 = torch.prim.ListConstruct %5789 : (!torch.int) -> !torch.list<int>
%false_1893 = torch.constant.bool false
%none_1894 = torch.constant.none
%5791 = torch.aten.tensor %5790, %none_1894, %none_1894, %false_1893 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1895, %indices_1896 = torch.aten.sort %5791, %int0_1890, %false_1893 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1897 = torch.constant.int 0
%5792 = torch.aten.select.int %values_1895, %int0_1890, %int0_1897 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5793 = torch.aten.item %5792 : !torch.vtensor<[1],si64> -> !torch.int
%5794 = torch.aten.unsqueeze %5781, %5793 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5795 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1898 = torch.constant.int 0
%int0_1899 = torch.constant.int 0
%int0_1900 = torch.constant.int 0
%5796 = torch.aten.select.int %5795, %int0_1898, %int0_1900 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5797 = torch.aten.item %5796 : !torch.vtensor<[1],si64> -> !torch.int
%5798 = torch.aten.lt.int %5797, %int0_1898 : !torch.int, !torch.int -> !torch.bool
%5799 = torch.aten.Int.bool %5798 : !torch.bool -> !torch.int
%5800 = torch.aten.mul.int %5799, %int0_1899 : !torch.int, !torch.int -> !torch.int
%5801 = torch.aten.add.int %5797, %5800 : !torch.int, !torch.int -> !torch.int
%5802 = torch.prim.ListConstruct %5801 : (!torch.int) -> !torch.list<int>
%false_1901 = torch.constant.bool false
%none_1902 = torch.constant.none
%5803 = torch.aten.tensor %5802, %none_1902, %none_1902, %false_1901 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1903, %indices_1904 = torch.aten.sort %5803, %int0_1898, %false_1901 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1905 = torch.constant.int 0
%5804 = torch.aten.select.int %values_1903, %int0_1898, %int0_1905 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%5805 = torch.aten.item %5804 : !torch.vtensor<[1],si64> -> !torch.int
%5806 = torch.aten.unsqueeze %5782, %5805 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%5807 = torch.prim.ListConstruct %5794, %5806 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1906 = torch.constant.int 0
%5808 = torch.aten.cat %5807, %int0_1906 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%5809 = torch.aten._shape_as_tensor %5780 : !torch.vtensor<[1,128,80,80],f32> -> !torch.vtensor<[4],si64>
%5810 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5811 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%5812 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1907 = torch.constant.none
%int1_1908 = torch.constant.int 1
%5813 = torch.prim.ListConstruct %int1_1908 : (!torch.int) -> !torch.list<int>
%5814 = torch.aten.ones %5813, %none_1907, %none_1907, %none_1907, %none_1907 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1909 = torch.constant.int 0
%int0_1910 = torch.constant.int 0
%5815 = torch.prim.NumToTensor.Scalar %int0_1910 : !torch.int -> !torch.vtensor<[1],si64>
%5816 = torch.aten.index_select %5811, %int0_1909, %5815 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5817 = torch.aten.item %5816 : !torch.vtensor<[1],si64> -> !torch.int
%5818 = torch.aten.index_select %5812, %int0_1909, %5815 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5819 = torch.aten.item %5818 : !torch.vtensor<[1],si64> -> !torch.int
%5820 = torch.aten.index_select %5810, %int0_1909, %5815 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5821 = torch.aten.item %5820 : !torch.vtensor<[1],si64> -> !torch.int
%5822 = torch.aten.index_select %5814, %int0_1909, %5815 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%5823 = torch.aten.item %5822 : !torch.vtensor<[1],si64> -> !torch.int
%5824 = torch.aten.slice.Tensor %5809, %5821, %5817, %5819, %5823 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1911 = torch.constant.int 4
%none_1912 = torch.constant.none
%false_1913 = torch.constant.bool false
%5825 = torch.aten.to.dtype %5808, %int4_1911, %false_1913, %false_1913, %none_1912 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%5826 = torch.prim.ListConstruct %5824, %5825 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1914 = torch.constant.int 0
%5827 = torch.aten.cat %5826, %int0_1914 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%5828 = torch.operator "onnx.Resize"(%5780, %none, %none, %5827) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,128,80,80],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%5829 = torch.prim.ListConstruct %5828, %2099 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,128,160,160],f32>) -> !torch.list<vtensor>
%int1_1915 = torch.constant.int 1
%5830 = torch.aten.cat %5829, %int1_1915 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,160,160],f32>
%5831 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5832 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1916 = torch.constant.int 12
%5833 = torch.aten.item %5831 : !torch.vtensor<[],f32> -> !torch.float
%5834 = torch.aten.item %5832 : !torch.vtensor<[],si8> -> !torch.int
%5835 = torch.aten.quantize_per_tensor %5830, %5833, %5834, %int12_1916 : !torch.vtensor<[1,?,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%5836 = torch.aten.int_repr %5835 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],si8>
%5837 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5838 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5839 = torch.aten.item %5837 : !torch.vtensor<[],f32> -> !torch.float
%5840 = torch.aten.item %5838 : !torch.vtensor<[],si8> -> !torch.int
%5841 = torch.aten._make_per_tensor_quantized_tensor %5836, %5839, %5840 : !torch.vtensor<[1,?,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%5842 = torch.aten.dequantize.self %5841 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],f32>
%5843 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5844 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1917 = torch.constant.int 12
%5845 = torch.aten.item %5843 : !torch.vtensor<[],f32> -> !torch.float
%5846 = torch.aten.item %5844 : !torch.vtensor<[],si8> -> !torch.int
%5847 = torch.aten.quantize_per_tensor %172, %5845, %5846, %int12_1917 : !torch.vtensor<[64,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,256,3,3],!torch.qint8>
%5848 = torch.aten.int_repr %5847 : !torch.vtensor<[64,256,3,3],!torch.qint8> -> !torch.vtensor<[64,256,3,3],si8>
%5849 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%5850 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5851 = torch.aten.item %5849 : !torch.vtensor<[],f32> -> !torch.float
%5852 = torch.aten.item %5850 : !torch.vtensor<[],si8> -> !torch.int
%5853 = torch.aten._make_per_tensor_quantized_tensor %5848, %5851, %5852 : !torch.vtensor<[64,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,256,3,3],!torch.qint8>
%5854 = torch.aten.dequantize.self %5853 : !torch.vtensor<[64,256,3,3],!torch.qint8> -> !torch.vtensor<[64,256,3,3],f32>
%5855 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5856 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1918 = torch.constant.int 12
%5857 = torch.aten.item %5855 : !torch.vtensor<[],f32> -> !torch.float
%5858 = torch.aten.item %5856 : !torch.vtensor<[],si8> -> !torch.int
%5859 = torch.aten.quantize_per_tensor %173, %5857, %5858, %int12_1918 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5860 = torch.aten.int_repr %5859 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%5861 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5862 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5863 = torch.aten.item %5861 : !torch.vtensor<[],f32> -> !torch.float
%5864 = torch.aten.item %5862 : !torch.vtensor<[],si8> -> !torch.int
%5865 = torch.aten._make_per_tensor_quantized_tensor %5860, %5863, %5864 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%5866 = torch.aten.dequantize.self %5865 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_1919 = torch.constant.int 1
%int1_1920 = torch.constant.int 1
%int1_1921 = torch.constant.int 1
%int1_1922 = torch.constant.int 1
%int1_1923 = torch.constant.int 1
%int1_1924 = torch.constant.int 1
%int0_1925 = torch.constant.int 0
%5867 = torch.prim.ListConstruct %int1_1919, %int1_1920 : (!torch.int, !torch.int) -> !torch.list<int>
%5868 = torch.prim.ListConstruct %int1_1921, %int1_1922 : (!torch.int, !torch.int) -> !torch.list<int>
%5869 = torch.prim.ListConstruct %int1_1923, %int1_1924 : (!torch.int, !torch.int) -> !torch.list<int>
%5870 = torch.prim.ListConstruct %int0_1925, %int0_1925 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1926 = torch.constant.bool false
%int1_1927 = torch.constant.int 1
%5871 = torch.aten.convolution %5842, %5854, %5866, %5869, %5867, %5868, %false_1926, %5870, %int1_1927 : !torch.vtensor<[1,?,160,160],f32>, !torch.vtensor<[64,256,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,160,160],f32>
%5872 = torch.aten.relu %5871 : !torch.vtensor<[1,64,160,160],f32> -> !torch.vtensor<[1,64,160,160],f32>
%5873 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5874 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1928 = torch.constant.int 12
%5875 = torch.aten.item %5873 : !torch.vtensor<[],f32> -> !torch.float
%5876 = torch.aten.item %5874 : !torch.vtensor<[],si8> -> !torch.int
%5877 = torch.aten.quantize_per_tensor %5872, %5875, %5876, %int12_1928 : !torch.vtensor<[1,64,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%5878 = torch.aten.int_repr %5877 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],si8>
%5879 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5880 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5881 = torch.aten.item %5879 : !torch.vtensor<[],f32> -> !torch.float
%5882 = torch.aten.item %5880 : !torch.vtensor<[],si8> -> !torch.int
%5883 = torch.aten._make_per_tensor_quantized_tensor %5878, %5881, %5882 : !torch.vtensor<[1,64,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%5884 = torch.aten.dequantize.self %5883 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],f32>
%5885 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5886 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1929 = torch.constant.int 12
%5887 = torch.aten.item %5885 : !torch.vtensor<[],f32> -> !torch.float
%5888 = torch.aten.item %5886 : !torch.vtensor<[],si8> -> !torch.int
%5889 = torch.aten.quantize_per_tensor %174, %5887, %5888, %int12_1929 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%5890 = torch.aten.int_repr %5889 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%5891 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5892 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5893 = torch.aten.item %5891 : !torch.vtensor<[],f32> -> !torch.float
%5894 = torch.aten.item %5892 : !torch.vtensor<[],si8> -> !torch.int
%5895 = torch.aten._make_per_tensor_quantized_tensor %5890, %5893, %5894 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%5896 = torch.aten.dequantize.self %5895 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%5897 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5898 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1930 = torch.constant.int 12
%5899 = torch.aten.item %5897 : !torch.vtensor<[],f32> -> !torch.float
%5900 = torch.aten.item %5898 : !torch.vtensor<[],si8> -> !torch.int
%5901 = torch.aten.quantize_per_tensor %175, %5899, %5900, %int12_1930 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%5902 = torch.aten.int_repr %5901 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%5903 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5904 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5905 = torch.aten.item %5903 : !torch.vtensor<[],f32> -> !torch.float
%5906 = torch.aten.item %5904 : !torch.vtensor<[],si8> -> !torch.int
%5907 = torch.aten._make_per_tensor_quantized_tensor %5902, %5905, %5906 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%5908 = torch.aten.dequantize.self %5907 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_1931 = torch.constant.int 1
%int1_1932 = torch.constant.int 1
%int1_1933 = torch.constant.int 1
%int1_1934 = torch.constant.int 1
%int1_1935 = torch.constant.int 1
%int1_1936 = torch.constant.int 1
%int0_1937 = torch.constant.int 0
%5909 = torch.prim.ListConstruct %int1_1931, %int1_1932 : (!torch.int, !torch.int) -> !torch.list<int>
%5910 = torch.prim.ListConstruct %int1_1933, %int1_1934 : (!torch.int, !torch.int) -> !torch.list<int>
%5911 = torch.prim.ListConstruct %int1_1935, %int1_1936 : (!torch.int, !torch.int) -> !torch.list<int>
%5912 = torch.prim.ListConstruct %int0_1937, %int0_1937 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1938 = torch.constant.bool false
%int1_1939 = torch.constant.int 1
%5913 = torch.aten.convolution %5884, %5896, %5908, %5911, %5909, %5910, %false_1938, %5912, %int1_1939 : !torch.vtensor<[1,64,160,160],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,160,160],f32>
%5914 = torch.aten.relu %5913 : !torch.vtensor<[1,32,160,160],f32> -> !torch.vtensor<[1,32,160,160],f32>
%5915 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5916 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1940 = torch.constant.int 12
%5917 = torch.aten.item %5915 : !torch.vtensor<[],f32> -> !torch.float
%5918 = torch.aten.item %5916 : !torch.vtensor<[],si8> -> !torch.int
%5919 = torch.aten.quantize_per_tensor %5914, %5917, %5918, %int12_1940 : !torch.vtensor<[1,32,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%5920 = torch.aten.int_repr %5919 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],si8>
%5921 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5922 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5923 = torch.aten.item %5921 : !torch.vtensor<[],f32> -> !torch.float
%5924 = torch.aten.item %5922 : !torch.vtensor<[],si8> -> !torch.int
%5925 = torch.aten._make_per_tensor_quantized_tensor %5920, %5923, %5924 : !torch.vtensor<[1,32,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,160,160],!torch.qint8>
%5926 = torch.aten.dequantize.self %5925 : !torch.vtensor<[1,32,160,160],!torch.qint8> -> !torch.vtensor<[1,32,160,160],f32>
%int2_1941 = torch.constant.int 2
%int2_1942 = torch.constant.int 2
%5927 = torch.prim.ListConstruct %int2_1941, %int2_1942 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1943 = torch.constant.int 0
%int0_1944 = torch.constant.int 0
%5928 = torch.prim.ListConstruct %int0_1943, %int0_1944 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1945 = torch.constant.int 2
%int2_1946 = torch.constant.int 2
%5929 = torch.prim.ListConstruct %int2_1945, %int2_1946 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1947 = torch.constant.int 1
%int1_1948 = torch.constant.int 1
%5930 = torch.prim.ListConstruct %int1_1947, %int1_1948 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1949 = torch.constant.bool true
%5931 = torch.aten.max_pool2d %5926, %5927, %5929, %5928, %5930, %true_1949 : !torch.vtensor<[1,32,160,160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,80,80],f32>
%5932 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5933 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1950 = torch.constant.int 12
%5934 = torch.aten.item %5932 : !torch.vtensor<[],f32> -> !torch.float
%5935 = torch.aten.item %5933 : !torch.vtensor<[],si8> -> !torch.int
%5936 = torch.aten.quantize_per_tensor %5931, %5934, %5935, %int12_1950 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%5937 = torch.aten.int_repr %5936 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%5938 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5939 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5940 = torch.aten.item %5938 : !torch.vtensor<[],f32> -> !torch.float
%5941 = torch.aten.item %5939 : !torch.vtensor<[],si8> -> !torch.int
%5942 = torch.aten._make_per_tensor_quantized_tensor %5937, %5940, %5941 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%5943 = torch.aten.dequantize.self %5942 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%5944 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5945 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1951 = torch.constant.int 12
%5946 = torch.aten.item %5944 : !torch.vtensor<[],f32> -> !torch.float
%5947 = torch.aten.item %5945 : !torch.vtensor<[],si8> -> !torch.int
%5948 = torch.aten.quantize_per_tensor %176, %5946, %5947, %int12_1951 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%5949 = torch.aten.int_repr %5948 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%5950 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%5951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5952 = torch.aten.item %5950 : !torch.vtensor<[],f32> -> !torch.float
%5953 = torch.aten.item %5951 : !torch.vtensor<[],si8> -> !torch.int
%5954 = torch.aten._make_per_tensor_quantized_tensor %5949, %5952, %5953 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%5955 = torch.aten.dequantize.self %5954 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%5956 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1952 = torch.constant.int 12
%5958 = torch.aten.item %5956 : !torch.vtensor<[],f32> -> !torch.float
%5959 = torch.aten.item %5957 : !torch.vtensor<[],si8> -> !torch.int
%5960 = torch.aten.quantize_per_tensor %177, %5958, %5959, %int12_1952 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%5961 = torch.aten.int_repr %5960 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%5962 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5963 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5964 = torch.aten.item %5962 : !torch.vtensor<[],f32> -> !torch.float
%5965 = torch.aten.item %5963 : !torch.vtensor<[],si8> -> !torch.int
%5966 = torch.aten._make_per_tensor_quantized_tensor %5961, %5964, %5965 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%5967 = torch.aten.dequantize.self %5966 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_1953 = torch.constant.int 1
%int1_1954 = torch.constant.int 1
%int1_1955 = torch.constant.int 1
%int1_1956 = torch.constant.int 1
%int1_1957 = torch.constant.int 1
%int1_1958 = torch.constant.int 1
%int0_1959 = torch.constant.int 0
%5968 = torch.prim.ListConstruct %int1_1953, %int1_1954 : (!torch.int, !torch.int) -> !torch.list<int>
%5969 = torch.prim.ListConstruct %int1_1955, %int1_1956 : (!torch.int, !torch.int) -> !torch.list<int>
%5970 = torch.prim.ListConstruct %int1_1957, %int1_1958 : (!torch.int, !torch.int) -> !torch.list<int>
%5971 = torch.prim.ListConstruct %int0_1959, %int0_1959 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1960 = torch.constant.bool false
%int1_1961 = torch.constant.int 1
%5972 = torch.aten.convolution %5943, %5955, %5967, %5970, %5968, %5969, %false_1960, %5971, %int1_1961 : !torch.vtensor<[1,32,80,80],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,80,80],f32>
%5973 = torch.aten.relu %5972 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[1,32,80,80],f32>
%5974 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5975 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1962 = torch.constant.int 12
%5976 = torch.aten.item %5974 : !torch.vtensor<[],f32> -> !torch.float
%5977 = torch.aten.item %5975 : !torch.vtensor<[],si8> -> !torch.int
%5978 = torch.aten.quantize_per_tensor %5973, %5976, %5977, %int12_1962 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%5979 = torch.aten.int_repr %5978 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%5980 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5981 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5982 = torch.aten.item %5980 : !torch.vtensor<[],f32> -> !torch.float
%5983 = torch.aten.item %5981 : !torch.vtensor<[],si8> -> !torch.int
%5984 = torch.aten._make_per_tensor_quantized_tensor %5979, %5982, %5983 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%5985 = torch.aten.dequantize.self %5984 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%int2_1963 = torch.constant.int 2
%int2_1964 = torch.constant.int 2
%5986 = torch.prim.ListConstruct %int2_1963, %int2_1964 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1965 = torch.constant.int 0
%int0_1966 = torch.constant.int 0
%5987 = torch.prim.ListConstruct %int0_1965, %int0_1966 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1967 = torch.constant.int 2
%int2_1968 = torch.constant.int 2
%5988 = torch.prim.ListConstruct %int2_1967, %int2_1968 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1969 = torch.constant.int 1
%int1_1970 = torch.constant.int 1
%5989 = torch.prim.ListConstruct %int1_1969, %int1_1970 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1971 = torch.constant.bool true
%5990 = torch.aten.max_pool2d %5985, %5986, %5988, %5987, %5989, %true_1971 : !torch.vtensor<[1,32,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,40,40],f32>
%5991 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5992 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1972 = torch.constant.int 12
%5993 = torch.aten.item %5991 : !torch.vtensor<[],f32> -> !torch.float
%5994 = torch.aten.item %5992 : !torch.vtensor<[],si8> -> !torch.int
%5995 = torch.aten.quantize_per_tensor %5990, %5993, %5994, %int12_1972 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%5996 = torch.aten.int_repr %5995 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%5997 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%5998 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%5999 = torch.aten.item %5997 : !torch.vtensor<[],f32> -> !torch.float
%6000 = torch.aten.item %5998 : !torch.vtensor<[],si8> -> !torch.int
%6001 = torch.aten._make_per_tensor_quantized_tensor %5996, %5999, %6000 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%6002 = torch.aten.dequantize.self %6001 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%6003 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6004 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1973 = torch.constant.int 12
%6005 = torch.aten.item %6003 : !torch.vtensor<[],f32> -> !torch.float
%6006 = torch.aten.item %6004 : !torch.vtensor<[],si8> -> !torch.int
%6007 = torch.aten.quantize_per_tensor %178, %6005, %6006, %int12_1973 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6008 = torch.aten.int_repr %6007 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%6009 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6010 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6011 = torch.aten.item %6009 : !torch.vtensor<[],f32> -> !torch.float
%6012 = torch.aten.item %6010 : !torch.vtensor<[],si8> -> !torch.int
%6013 = torch.aten._make_per_tensor_quantized_tensor %6008, %6011, %6012 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6014 = torch.aten.dequantize.self %6013 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%6015 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6016 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1974 = torch.constant.int 12
%6017 = torch.aten.item %6015 : !torch.vtensor<[],f32> -> !torch.float
%6018 = torch.aten.item %6016 : !torch.vtensor<[],si8> -> !torch.int
%6019 = torch.aten.quantize_per_tensor %179, %6017, %6018, %int12_1974 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6020 = torch.aten.int_repr %6019 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6021 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6022 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6023 = torch.aten.item %6021 : !torch.vtensor<[],f32> -> !torch.float
%6024 = torch.aten.item %6022 : !torch.vtensor<[],si8> -> !torch.int
%6025 = torch.aten._make_per_tensor_quantized_tensor %6020, %6023, %6024 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6026 = torch.aten.dequantize.self %6025 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_1975 = torch.constant.int 1
%int1_1976 = torch.constant.int 1
%int1_1977 = torch.constant.int 1
%int1_1978 = torch.constant.int 1
%int1_1979 = torch.constant.int 1
%int1_1980 = torch.constant.int 1
%int0_1981 = torch.constant.int 0
%6027 = torch.prim.ListConstruct %int1_1975, %int1_1976 : (!torch.int, !torch.int) -> !torch.list<int>
%6028 = torch.prim.ListConstruct %int1_1977, %int1_1978 : (!torch.int, !torch.int) -> !torch.list<int>
%6029 = torch.prim.ListConstruct %int1_1979, %int1_1980 : (!torch.int, !torch.int) -> !torch.list<int>
%6030 = torch.prim.ListConstruct %int0_1981, %int0_1981 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1982 = torch.constant.bool false
%int1_1983 = torch.constant.int 1
%6031 = torch.aten.convolution %6002, %6014, %6026, %6029, %6027, %6028, %false_1982, %6030, %int1_1983 : !torch.vtensor<[1,32,40,40],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,40,40],f32>
%6032 = torch.aten.relu %6031 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[1,32,40,40],f32>
%6033 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6034 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1984 = torch.constant.int 12
%6035 = torch.aten.item %6033 : !torch.vtensor<[],f32> -> !torch.float
%6036 = torch.aten.item %6034 : !torch.vtensor<[],si8> -> !torch.int
%6037 = torch.aten.quantize_per_tensor %6032, %6035, %6036, %int12_1984 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%6038 = torch.aten.int_repr %6037 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%6039 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6040 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6041 = torch.aten.item %6039 : !torch.vtensor<[],f32> -> !torch.float
%6042 = torch.aten.item %6040 : !torch.vtensor<[],si8> -> !torch.int
%6043 = torch.aten._make_per_tensor_quantized_tensor %6038, %6041, %6042 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%6044 = torch.aten.dequantize.self %6043 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%int2_1985 = torch.constant.int 2
%int2_1986 = torch.constant.int 2
%6045 = torch.prim.ListConstruct %int2_1985, %int2_1986 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_1987 = torch.constant.int 0
%int0_1988 = torch.constant.int 0
%6046 = torch.prim.ListConstruct %int0_1987, %int0_1988 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_1989 = torch.constant.int 2
%int2_1990 = torch.constant.int 2
%6047 = torch.prim.ListConstruct %int2_1989, %int2_1990 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_1991 = torch.constant.int 1
%int1_1992 = torch.constant.int 1
%6048 = torch.prim.ListConstruct %int1_1991, %int1_1992 : (!torch.int, !torch.int) -> !torch.list<int>
%true_1993 = torch.constant.bool true
%6049 = torch.aten.max_pool2d %6044, %6045, %6047, %6046, %6048, %true_1993 : !torch.vtensor<[1,32,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,20,20],f32>
%6050 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6051 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1994 = torch.constant.int 12
%6052 = torch.aten.item %6050 : !torch.vtensor<[],f32> -> !torch.float
%6053 = torch.aten.item %6051 : !torch.vtensor<[],si8> -> !torch.int
%6054 = torch.aten.quantize_per_tensor %6049, %6052, %6053, %int12_1994 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%6055 = torch.aten.int_repr %6054 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%6056 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6057 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6058 = torch.aten.item %6056 : !torch.vtensor<[],f32> -> !torch.float
%6059 = torch.aten.item %6057 : !torch.vtensor<[],si8> -> !torch.int
%6060 = torch.aten._make_per_tensor_quantized_tensor %6055, %6058, %6059 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%6061 = torch.aten.dequantize.self %6060 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%6062 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6063 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1995 = torch.constant.int 12
%6064 = torch.aten.item %6062 : !torch.vtensor<[],f32> -> !torch.float
%6065 = torch.aten.item %6063 : !torch.vtensor<[],si8> -> !torch.int
%6066 = torch.aten.quantize_per_tensor %180, %6064, %6065, %int12_1995 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6067 = torch.aten.int_repr %6066 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%6068 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6069 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6070 = torch.aten.item %6068 : !torch.vtensor<[],f32> -> !torch.float
%6071 = torch.aten.item %6069 : !torch.vtensor<[],si8> -> !torch.int
%6072 = torch.aten._make_per_tensor_quantized_tensor %6067, %6070, %6071 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6073 = torch.aten.dequantize.self %6072 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%6074 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6075 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1996 = torch.constant.int 12
%6076 = torch.aten.item %6074 : !torch.vtensor<[],f32> -> !torch.float
%6077 = torch.aten.item %6075 : !torch.vtensor<[],si8> -> !torch.int
%6078 = torch.aten.quantize_per_tensor %181, %6076, %6077, %int12_1996 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6079 = torch.aten.int_repr %6078 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6080 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6081 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6082 = torch.aten.item %6080 : !torch.vtensor<[],f32> -> !torch.float
%6083 = torch.aten.item %6081 : !torch.vtensor<[],si8> -> !torch.int
%6084 = torch.aten._make_per_tensor_quantized_tensor %6079, %6082, %6083 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6085 = torch.aten.dequantize.self %6084 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_1997 = torch.constant.int 1
%int1_1998 = torch.constant.int 1
%int1_1999 = torch.constant.int 1
%int1_2000 = torch.constant.int 1
%int1_2001 = torch.constant.int 1
%int1_2002 = torch.constant.int 1
%int0_2003 = torch.constant.int 0
%6086 = torch.prim.ListConstruct %int1_1997, %int1_1998 : (!torch.int, !torch.int) -> !torch.list<int>
%6087 = torch.prim.ListConstruct %int1_1999, %int1_2000 : (!torch.int, !torch.int) -> !torch.list<int>
%6088 = torch.prim.ListConstruct %int1_2001, %int1_2002 : (!torch.int, !torch.int) -> !torch.list<int>
%6089 = torch.prim.ListConstruct %int0_2003, %int0_2003 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2004 = torch.constant.bool false
%int1_2005 = torch.constant.int 1
%6090 = torch.aten.convolution %6061, %6073, %6085, %6088, %6086, %6087, %false_2004, %6089, %int1_2005 : !torch.vtensor<[1,32,20,20],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,20,20],f32>
%6091 = torch.aten.relu %6090 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[1,32,20,20],f32>
%6092 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6093 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2006 = torch.constant.int 12
%6094 = torch.aten.item %6092 : !torch.vtensor<[],f32> -> !torch.float
%6095 = torch.aten.item %6093 : !torch.vtensor<[],si8> -> !torch.int
%6096 = torch.aten.quantize_per_tensor %6091, %6094, %6095, %int12_2006 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%6097 = torch.aten.int_repr %6096 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%6098 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6099 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6100 = torch.aten.item %6098 : !torch.vtensor<[],f32> -> !torch.float
%6101 = torch.aten.item %6099 : !torch.vtensor<[],si8> -> !torch.int
%6102 = torch.aten._make_per_tensor_quantized_tensor %6097, %6100, %6101 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%6103 = torch.aten.dequantize.self %6102 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%int2_2007 = torch.constant.int 2
%int2_2008 = torch.constant.int 2
%6104 = torch.prim.ListConstruct %int2_2007, %int2_2008 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_2009 = torch.constant.int 0
%int0_2010 = torch.constant.int 0
%6105 = torch.prim.ListConstruct %int0_2009, %int0_2010 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_2011 = torch.constant.int 2
%int2_2012 = torch.constant.int 2
%6106 = torch.prim.ListConstruct %int2_2011, %int2_2012 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2013 = torch.constant.int 1
%int1_2014 = torch.constant.int 1
%6107 = torch.prim.ListConstruct %int1_2013, %int1_2014 : (!torch.int, !torch.int) -> !torch.list<int>
%true_2015 = torch.constant.bool true
%6108 = torch.aten.max_pool2d %6103, %6104, %6106, %6105, %6107, %true_2015 : !torch.vtensor<[1,32,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,10,10],f32>
%6109 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2016 = torch.constant.int 12
%6111 = torch.aten.item %6109 : !torch.vtensor<[],f32> -> !torch.float
%6112 = torch.aten.item %6110 : !torch.vtensor<[],si8> -> !torch.int
%6113 = torch.aten.quantize_per_tensor %6108, %6111, %6112, %int12_2016 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%6114 = torch.aten.int_repr %6113 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%6115 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6116 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6117 = torch.aten.item %6115 : !torch.vtensor<[],f32> -> !torch.float
%6118 = torch.aten.item %6116 : !torch.vtensor<[],si8> -> !torch.int
%6119 = torch.aten._make_per_tensor_quantized_tensor %6114, %6117, %6118 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%6120 = torch.aten.dequantize.self %6119 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%6121 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6122 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2017 = torch.constant.int 12
%6123 = torch.aten.item %6121 : !torch.vtensor<[],f32> -> !torch.float
%6124 = torch.aten.item %6122 : !torch.vtensor<[],si8> -> !torch.int
%6125 = torch.aten.quantize_per_tensor %182, %6123, %6124, %int12_2017 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6126 = torch.aten.int_repr %6125 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%6127 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6128 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6129 = torch.aten.item %6127 : !torch.vtensor<[],f32> -> !torch.float
%6130 = torch.aten.item %6128 : !torch.vtensor<[],si8> -> !torch.int
%6131 = torch.aten._make_per_tensor_quantized_tensor %6126, %6129, %6130 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6132 = torch.aten.dequantize.self %6131 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%6133 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6134 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2018 = torch.constant.int 12
%6135 = torch.aten.item %6133 : !torch.vtensor<[],f32> -> !torch.float
%6136 = torch.aten.item %6134 : !torch.vtensor<[],si8> -> !torch.int
%6137 = torch.aten.quantize_per_tensor %183, %6135, %6136, %int12_2018 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6138 = torch.aten.int_repr %6137 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6139 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6140 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6141 = torch.aten.item %6139 : !torch.vtensor<[],f32> -> !torch.float
%6142 = torch.aten.item %6140 : !torch.vtensor<[],si8> -> !torch.int
%6143 = torch.aten._make_per_tensor_quantized_tensor %6138, %6141, %6142 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6144 = torch.aten.dequantize.self %6143 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_2019 = torch.constant.int 1
%int1_2020 = torch.constant.int 1
%int1_2021 = torch.constant.int 1
%int1_2022 = torch.constant.int 1
%int1_2023 = torch.constant.int 1
%int1_2024 = torch.constant.int 1
%int0_2025 = torch.constant.int 0
%6145 = torch.prim.ListConstruct %int1_2019, %int1_2020 : (!torch.int, !torch.int) -> !torch.list<int>
%6146 = torch.prim.ListConstruct %int1_2021, %int1_2022 : (!torch.int, !torch.int) -> !torch.list<int>
%6147 = torch.prim.ListConstruct %int1_2023, %int1_2024 : (!torch.int, !torch.int) -> !torch.list<int>
%6148 = torch.prim.ListConstruct %int0_2025, %int0_2025 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2026 = torch.constant.bool false
%int1_2027 = torch.constant.int 1
%6149 = torch.aten.convolution %6120, %6132, %6144, %6147, %6145, %6146, %false_2026, %6148, %int1_2027 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%6150 = torch.aten.relu %6149 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%6151 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2028 = torch.constant.int 12
%6153 = torch.aten.item %6151 : !torch.vtensor<[],f32> -> !torch.float
%6154 = torch.aten.item %6152 : !torch.vtensor<[],si8> -> !torch.int
%6155 = torch.aten.quantize_per_tensor %6150, %6153, %6154, %int12_2028 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%6156 = torch.aten.int_repr %6155 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%6157 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6159 = torch.aten.item %6157 : !torch.vtensor<[],f32> -> !torch.float
%6160 = torch.aten.item %6158 : !torch.vtensor<[],si8> -> !torch.int
%6161 = torch.aten._make_per_tensor_quantized_tensor %6156, %6159, %6160 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%6162 = torch.aten.dequantize.self %6161 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%6163 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2029 = torch.constant.int 12
%6165 = torch.aten.item %6163 : !torch.vtensor<[],f32> -> !torch.float
%6166 = torch.aten.item %6164 : !torch.vtensor<[],si8> -> !torch.int
%6167 = torch.aten.quantize_per_tensor %184, %6165, %6166, %int12_2029 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6168 = torch.aten.int_repr %6167 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%6169 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6171 = torch.aten.item %6169 : !torch.vtensor<[],f32> -> !torch.float
%6172 = torch.aten.item %6170 : !torch.vtensor<[],si8> -> !torch.int
%6173 = torch.aten._make_per_tensor_quantized_tensor %6168, %6171, %6172 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%6174 = torch.aten.dequantize.self %6173 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%6175 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6176 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2030 = torch.constant.int 12
%6177 = torch.aten.item %6175 : !torch.vtensor<[],f32> -> !torch.float
%6178 = torch.aten.item %6176 : !torch.vtensor<[],si8> -> !torch.int
%6179 = torch.aten.quantize_per_tensor %185, %6177, %6178, %int12_2030 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6180 = torch.aten.int_repr %6179 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6181 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6183 = torch.aten.item %6181 : !torch.vtensor<[],f32> -> !torch.float
%6184 = torch.aten.item %6182 : !torch.vtensor<[],si8> -> !torch.int
%6185 = torch.aten._make_per_tensor_quantized_tensor %6180, %6183, %6184 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6186 = torch.aten.dequantize.self %6185 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int2_2031 = torch.constant.int 2
%int2_2032 = torch.constant.int 2
%int2_2033 = torch.constant.int 2
%int2_2034 = torch.constant.int 2
%int1_2035 = torch.constant.int 1
%int1_2036 = torch.constant.int 1
%int0_2037 = torch.constant.int 0
%6187 = torch.prim.ListConstruct %int2_2031, %int2_2032 : (!torch.int, !torch.int) -> !torch.list<int>
%6188 = torch.prim.ListConstruct %int2_2033, %int2_2034 : (!torch.int, !torch.int) -> !torch.list<int>
%6189 = torch.prim.ListConstruct %int1_2035, %int1_2036 : (!torch.int, !torch.int) -> !torch.list<int>
%6190 = torch.prim.ListConstruct %int0_2037, %int0_2037 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2038 = torch.constant.bool false
%int1_2039 = torch.constant.int 1
%6191 = torch.aten.convolution %6162, %6174, %6186, %6189, %6187, %6188, %false_2038, %6190, %int1_2039 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%6192 = torch.aten.relu %6191 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%6193 = torch.prim.ListConstruct %6192, %6162 : (!torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[1,32,10,10],f32>) -> !torch.list<vtensor>
%int1_2040 = torch.constant.int 1
%6194 = torch.aten.cat %6193, %int1_2040 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,64,10,10],f32>
%6195 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6196 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2041 = torch.constant.int 12
%6197 = torch.aten.item %6195 : !torch.vtensor<[],f32> -> !torch.float
%6198 = torch.aten.item %6196 : !torch.vtensor<[],si8> -> !torch.int
%6199 = torch.aten.quantize_per_tensor %6194, %6197, %6198, %int12_2041 : !torch.vtensor<[1,64,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%6200 = torch.aten.int_repr %6199 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],si8>
%6201 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6202 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6203 = torch.aten.item %6201 : !torch.vtensor<[],f32> -> !torch.float
%6204 = torch.aten.item %6202 : !torch.vtensor<[],si8> -> !torch.int
%6205 = torch.aten._make_per_tensor_quantized_tensor %6200, %6203, %6204 : !torch.vtensor<[1,64,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,10,10],!torch.qint8>
%6206 = torch.aten.dequantize.self %6205 : !torch.vtensor<[1,64,10,10],!torch.qint8> -> !torch.vtensor<[1,64,10,10],f32>
%6207 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6208 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2042 = torch.constant.int 12
%6209 = torch.aten.item %6207 : !torch.vtensor<[],f32> -> !torch.float
%6210 = torch.aten.item %6208 : !torch.vtensor<[],si8> -> !torch.int
%6211 = torch.aten.quantize_per_tensor %186, %6209, %6210, %int12_2042 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6212 = torch.aten.int_repr %6211 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%6213 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6214 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6215 = torch.aten.item %6213 : !torch.vtensor<[],f32> -> !torch.float
%6216 = torch.aten.item %6214 : !torch.vtensor<[],si8> -> !torch.int
%6217 = torch.aten._make_per_tensor_quantized_tensor %6212, %6215, %6216 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6218 = torch.aten.dequantize.self %6217 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%6219 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6220 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2043 = torch.constant.int 12
%6221 = torch.aten.item %6219 : !torch.vtensor<[],f32> -> !torch.float
%6222 = torch.aten.item %6220 : !torch.vtensor<[],si8> -> !torch.int
%6223 = torch.aten.quantize_per_tensor %187, %6221, %6222, %int12_2043 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6224 = torch.aten.int_repr %6223 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6225 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6226 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6227 = torch.aten.item %6225 : !torch.vtensor<[],f32> -> !torch.float
%6228 = torch.aten.item %6226 : !torch.vtensor<[],si8> -> !torch.int
%6229 = torch.aten._make_per_tensor_quantized_tensor %6224, %6227, %6228 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6230 = torch.aten.dequantize.self %6229 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_2044 = torch.constant.int 1
%int1_2045 = torch.constant.int 1
%int1_2046 = torch.constant.int 1
%int1_2047 = torch.constant.int 1
%int1_2048 = torch.constant.int 1
%int1_2049 = torch.constant.int 1
%int0_2050 = torch.constant.int 0
%6231 = torch.prim.ListConstruct %int1_2044, %int1_2045 : (!torch.int, !torch.int) -> !torch.list<int>
%6232 = torch.prim.ListConstruct %int1_2046, %int1_2047 : (!torch.int, !torch.int) -> !torch.list<int>
%6233 = torch.prim.ListConstruct %int1_2048, %int1_2049 : (!torch.int, !torch.int) -> !torch.list<int>
%6234 = torch.prim.ListConstruct %int0_2050, %int0_2050 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2051 = torch.constant.bool false
%int1_2052 = torch.constant.int 1
%6235 = torch.aten.convolution %6206, %6218, %6230, %6233, %6231, %6232, %false_2051, %6234, %int1_2052 : !torch.vtensor<[1,64,10,10],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%6236 = torch.aten.relu %6235 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[1,32,10,10],f32>
%6237 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6238 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2053 = torch.constant.int 12
%6239 = torch.aten.item %6237 : !torch.vtensor<[],f32> -> !torch.float
%6240 = torch.aten.item %6238 : !torch.vtensor<[],si8> -> !torch.int
%6241 = torch.aten.quantize_per_tensor %6236, %6239, %6240, %int12_2053 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%6242 = torch.aten.int_repr %6241 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%6243 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6244 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6245 = torch.aten.item %6243 : !torch.vtensor<[],f32> -> !torch.float
%6246 = torch.aten.item %6244 : !torch.vtensor<[],si8> -> !torch.int
%6247 = torch.aten._make_per_tensor_quantized_tensor %6242, %6245, %6246 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%6248 = torch.aten.dequantize.self %6247 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%6249 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%6250 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%6251 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2054 = torch.constant.int 0
%int0_2055 = torch.constant.int 0
%int0_2056 = torch.constant.int 0
%6252 = torch.aten.select.int %6251, %int0_2054, %int0_2056 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6253 = torch.aten.item %6252 : !torch.vtensor<[1],si64> -> !torch.int
%6254 = torch.aten.lt.int %6253, %int0_2054 : !torch.int, !torch.int -> !torch.bool
%6255 = torch.aten.Int.bool %6254 : !torch.bool -> !torch.int
%6256 = torch.aten.mul.int %6255, %int0_2055 : !torch.int, !torch.int -> !torch.int
%6257 = torch.aten.add.int %6253, %6256 : !torch.int, !torch.int -> !torch.int
%6258 = torch.prim.ListConstruct %6257 : (!torch.int) -> !torch.list<int>
%false_2057 = torch.constant.bool false
%none_2058 = torch.constant.none
%6259 = torch.aten.tensor %6258, %none_2058, %none_2058, %false_2057 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2059, %indices_2060 = torch.aten.sort %6259, %int0_2054, %false_2057 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2061 = torch.constant.int 0
%6260 = torch.aten.select.int %values_2059, %int0_2054, %int0_2061 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6261 = torch.aten.item %6260 : !torch.vtensor<[1],si64> -> !torch.int
%6262 = torch.aten.unsqueeze %6249, %6261 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6263 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2062 = torch.constant.int 0
%int0_2063 = torch.constant.int 0
%int0_2064 = torch.constant.int 0
%6264 = torch.aten.select.int %6263, %int0_2062, %int0_2064 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6265 = torch.aten.item %6264 : !torch.vtensor<[1],si64> -> !torch.int
%6266 = torch.aten.lt.int %6265, %int0_2062 : !torch.int, !torch.int -> !torch.bool
%6267 = torch.aten.Int.bool %6266 : !torch.bool -> !torch.int
%6268 = torch.aten.mul.int %6267, %int0_2063 : !torch.int, !torch.int -> !torch.int
%6269 = torch.aten.add.int %6265, %6268 : !torch.int, !torch.int -> !torch.int
%6270 = torch.prim.ListConstruct %6269 : (!torch.int) -> !torch.list<int>
%false_2065 = torch.constant.bool false
%none_2066 = torch.constant.none
%6271 = torch.aten.tensor %6270, %none_2066, %none_2066, %false_2065 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2067, %indices_2068 = torch.aten.sort %6271, %int0_2062, %false_2065 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2069 = torch.constant.int 0
%6272 = torch.aten.select.int %values_2067, %int0_2062, %int0_2069 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6273 = torch.aten.item %6272 : !torch.vtensor<[1],si64> -> !torch.int
%6274 = torch.aten.unsqueeze %6250, %6273 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6275 = torch.prim.ListConstruct %6262, %6274 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2070 = torch.constant.int 0
%6276 = torch.aten.cat %6275, %int0_2070 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%6277 = torch.aten._shape_as_tensor %6248 : !torch.vtensor<[1,32,10,10],f32> -> !torch.vtensor<[4],si64>
%6278 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6279 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6280 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2071 = torch.constant.none
%int1_2072 = torch.constant.int 1
%6281 = torch.prim.ListConstruct %int1_2072 : (!torch.int) -> !torch.list<int>
%6282 = torch.aten.ones %6281, %none_2071, %none_2071, %none_2071, %none_2071 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2073 = torch.constant.int 0
%int0_2074 = torch.constant.int 0
%6283 = torch.prim.NumToTensor.Scalar %int0_2074 : !torch.int -> !torch.vtensor<[1],si64>
%6284 = torch.aten.index_select %6279, %int0_2073, %6283 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6285 = torch.aten.item %6284 : !torch.vtensor<[1],si64> -> !torch.int
%6286 = torch.aten.index_select %6280, %int0_2073, %6283 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6287 = torch.aten.item %6286 : !torch.vtensor<[1],si64> -> !torch.int
%6288 = torch.aten.index_select %6278, %int0_2073, %6283 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6289 = torch.aten.item %6288 : !torch.vtensor<[1],si64> -> !torch.int
%6290 = torch.aten.index_select %6282, %int0_2073, %6283 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6291 = torch.aten.item %6290 : !torch.vtensor<[1],si64> -> !torch.int
%6292 = torch.aten.slice.Tensor %6277, %6289, %6285, %6287, %6291 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2075 = torch.constant.int 4
%none_2076 = torch.constant.none
%false_2077 = torch.constant.bool false
%6293 = torch.aten.to.dtype %6276, %int4_2075, %false_2077, %false_2077, %none_2076 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%6294 = torch.prim.ListConstruct %6292, %6293 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2078 = torch.constant.int 0
%6295 = torch.aten.cat %6294, %int0_2078 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%6296 = torch.operator "onnx.Resize"(%6248, %none, %none, %6295) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%6297 = torch.prim.ListConstruct %6296, %6103 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,20,20],f32>) -> !torch.list<vtensor>
%int1_2079 = torch.constant.int 1
%6298 = torch.aten.cat %6297, %int1_2079 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%6299 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6300 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2080 = torch.constant.int 12
%6301 = torch.aten.item %6299 : !torch.vtensor<[],f32> -> !torch.float
%6302 = torch.aten.item %6300 : !torch.vtensor<[],si8> -> !torch.int
%6303 = torch.aten.quantize_per_tensor %6298, %6301, %6302, %int12_2080 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%6304 = torch.aten.int_repr %6303 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%6305 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6306 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6307 = torch.aten.item %6305 : !torch.vtensor<[],f32> -> !torch.float
%6308 = torch.aten.item %6306 : !torch.vtensor<[],si8> -> !torch.int
%6309 = torch.aten._make_per_tensor_quantized_tensor %6304, %6307, %6308 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%6310 = torch.aten.dequantize.self %6309 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%6311 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6312 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2081 = torch.constant.int 12
%6313 = torch.aten.item %6311 : !torch.vtensor<[],f32> -> !torch.float
%6314 = torch.aten.item %6312 : !torch.vtensor<[],si8> -> !torch.int
%6315 = torch.aten.quantize_per_tensor %188, %6313, %6314, %int12_2081 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6316 = torch.aten.int_repr %6315 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%6317 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6318 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6319 = torch.aten.item %6317 : !torch.vtensor<[],f32> -> !torch.float
%6320 = torch.aten.item %6318 : !torch.vtensor<[],si8> -> !torch.int
%6321 = torch.aten._make_per_tensor_quantized_tensor %6316, %6319, %6320 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6322 = torch.aten.dequantize.self %6321 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%6323 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6324 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2082 = torch.constant.int 12
%6325 = torch.aten.item %6323 : !torch.vtensor<[],f32> -> !torch.float
%6326 = torch.aten.item %6324 : !torch.vtensor<[],si8> -> !torch.int
%6327 = torch.aten.quantize_per_tensor %189, %6325, %6326, %int12_2082 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6328 = torch.aten.int_repr %6327 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6329 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6330 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6331 = torch.aten.item %6329 : !torch.vtensor<[],f32> -> !torch.float
%6332 = torch.aten.item %6330 : !torch.vtensor<[],si8> -> !torch.int
%6333 = torch.aten._make_per_tensor_quantized_tensor %6328, %6331, %6332 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6334 = torch.aten.dequantize.self %6333 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_2083 = torch.constant.int 1
%int1_2084 = torch.constant.int 1
%int1_2085 = torch.constant.int 1
%int1_2086 = torch.constant.int 1
%int1_2087 = torch.constant.int 1
%int1_2088 = torch.constant.int 1
%int0_2089 = torch.constant.int 0
%6335 = torch.prim.ListConstruct %int1_2083, %int1_2084 : (!torch.int, !torch.int) -> !torch.list<int>
%6336 = torch.prim.ListConstruct %int1_2085, %int1_2086 : (!torch.int, !torch.int) -> !torch.list<int>
%6337 = torch.prim.ListConstruct %int1_2087, %int1_2088 : (!torch.int, !torch.int) -> !torch.list<int>
%6338 = torch.prim.ListConstruct %int0_2089, %int0_2089 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2090 = torch.constant.bool false
%int1_2091 = torch.constant.int 1
%6339 = torch.aten.convolution %6310, %6322, %6334, %6337, %6335, %6336, %false_2090, %6338, %int1_2091 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,20,20],f32>
%6340 = torch.aten.relu %6339 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[1,32,20,20],f32>
%6341 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6342 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2092 = torch.constant.int 12
%6343 = torch.aten.item %6341 : !torch.vtensor<[],f32> -> !torch.float
%6344 = torch.aten.item %6342 : !torch.vtensor<[],si8> -> !torch.int
%6345 = torch.aten.quantize_per_tensor %6340, %6343, %6344, %int12_2092 : !torch.vtensor<[1,32,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%6346 = torch.aten.int_repr %6345 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],si8>
%6347 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6348 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6349 = torch.aten.item %6347 : !torch.vtensor<[],f32> -> !torch.float
%6350 = torch.aten.item %6348 : !torch.vtensor<[],si8> -> !torch.int
%6351 = torch.aten._make_per_tensor_quantized_tensor %6346, %6349, %6350 : !torch.vtensor<[1,32,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,20,20],!torch.qint8>
%6352 = torch.aten.dequantize.self %6351 : !torch.vtensor<[1,32,20,20],!torch.qint8> -> !torch.vtensor<[1,32,20,20],f32>
%6353 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%6354 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%6355 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2093 = torch.constant.int 0
%int0_2094 = torch.constant.int 0
%int0_2095 = torch.constant.int 0
%6356 = torch.aten.select.int %6355, %int0_2093, %int0_2095 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6357 = torch.aten.item %6356 : !torch.vtensor<[1],si64> -> !torch.int
%6358 = torch.aten.lt.int %6357, %int0_2093 : !torch.int, !torch.int -> !torch.bool
%6359 = torch.aten.Int.bool %6358 : !torch.bool -> !torch.int
%6360 = torch.aten.mul.int %6359, %int0_2094 : !torch.int, !torch.int -> !torch.int
%6361 = torch.aten.add.int %6357, %6360 : !torch.int, !torch.int -> !torch.int
%6362 = torch.prim.ListConstruct %6361 : (!torch.int) -> !torch.list<int>
%false_2096 = torch.constant.bool false
%none_2097 = torch.constant.none
%6363 = torch.aten.tensor %6362, %none_2097, %none_2097, %false_2096 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2098, %indices_2099 = torch.aten.sort %6363, %int0_2093, %false_2096 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2100 = torch.constant.int 0
%6364 = torch.aten.select.int %values_2098, %int0_2093, %int0_2100 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6365 = torch.aten.item %6364 : !torch.vtensor<[1],si64> -> !torch.int
%6366 = torch.aten.unsqueeze %6353, %6365 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6367 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2101 = torch.constant.int 0
%int0_2102 = torch.constant.int 0
%int0_2103 = torch.constant.int 0
%6368 = torch.aten.select.int %6367, %int0_2101, %int0_2103 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6369 = torch.aten.item %6368 : !torch.vtensor<[1],si64> -> !torch.int
%6370 = torch.aten.lt.int %6369, %int0_2101 : !torch.int, !torch.int -> !torch.bool
%6371 = torch.aten.Int.bool %6370 : !torch.bool -> !torch.int
%6372 = torch.aten.mul.int %6371, %int0_2102 : !torch.int, !torch.int -> !torch.int
%6373 = torch.aten.add.int %6369, %6372 : !torch.int, !torch.int -> !torch.int
%6374 = torch.prim.ListConstruct %6373 : (!torch.int) -> !torch.list<int>
%false_2104 = torch.constant.bool false
%none_2105 = torch.constant.none
%6375 = torch.aten.tensor %6374, %none_2105, %none_2105, %false_2104 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2106, %indices_2107 = torch.aten.sort %6375, %int0_2101, %false_2104 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2108 = torch.constant.int 0
%6376 = torch.aten.select.int %values_2106, %int0_2101, %int0_2108 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6377 = torch.aten.item %6376 : !torch.vtensor<[1],si64> -> !torch.int
%6378 = torch.aten.unsqueeze %6354, %6377 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6379 = torch.prim.ListConstruct %6366, %6378 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2109 = torch.constant.int 0
%6380 = torch.aten.cat %6379, %int0_2109 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%6381 = torch.aten._shape_as_tensor %6352 : !torch.vtensor<[1,32,20,20],f32> -> !torch.vtensor<[4],si64>
%6382 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6383 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6384 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2110 = torch.constant.none
%int1_2111 = torch.constant.int 1
%6385 = torch.prim.ListConstruct %int1_2111 : (!torch.int) -> !torch.list<int>
%6386 = torch.aten.ones %6385, %none_2110, %none_2110, %none_2110, %none_2110 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2112 = torch.constant.int 0
%int0_2113 = torch.constant.int 0
%6387 = torch.prim.NumToTensor.Scalar %int0_2113 : !torch.int -> !torch.vtensor<[1],si64>
%6388 = torch.aten.index_select %6383, %int0_2112, %6387 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6389 = torch.aten.item %6388 : !torch.vtensor<[1],si64> -> !torch.int
%6390 = torch.aten.index_select %6384, %int0_2112, %6387 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6391 = torch.aten.item %6390 : !torch.vtensor<[1],si64> -> !torch.int
%6392 = torch.aten.index_select %6382, %int0_2112, %6387 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6393 = torch.aten.item %6392 : !torch.vtensor<[1],si64> -> !torch.int
%6394 = torch.aten.index_select %6386, %int0_2112, %6387 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6395 = torch.aten.item %6394 : !torch.vtensor<[1],si64> -> !torch.int
%6396 = torch.aten.slice.Tensor %6381, %6393, %6389, %6391, %6395 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2114 = torch.constant.int 4
%none_2115 = torch.constant.none
%false_2116 = torch.constant.bool false
%6397 = torch.aten.to.dtype %6380, %int4_2114, %false_2116, %false_2116, %none_2115 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%6398 = torch.prim.ListConstruct %6396, %6397 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2117 = torch.constant.int 0
%6399 = torch.aten.cat %6398, %int0_2117 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%6400 = torch.operator "onnx.Resize"(%6352, %none, %none, %6399) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%6401 = torch.prim.ListConstruct %6400, %6044 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,40,40],f32>) -> !torch.list<vtensor>
%int1_2118 = torch.constant.int 1
%6402 = torch.aten.cat %6401, %int1_2118 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%6403 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6404 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2119 = torch.constant.int 12
%6405 = torch.aten.item %6403 : !torch.vtensor<[],f32> -> !torch.float
%6406 = torch.aten.item %6404 : !torch.vtensor<[],si8> -> !torch.int
%6407 = torch.aten.quantize_per_tensor %6402, %6405, %6406, %int12_2119 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%6408 = torch.aten.int_repr %6407 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%6409 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6410 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6411 = torch.aten.item %6409 : !torch.vtensor<[],f32> -> !torch.float
%6412 = torch.aten.item %6410 : !torch.vtensor<[],si8> -> !torch.int
%6413 = torch.aten._make_per_tensor_quantized_tensor %6408, %6411, %6412 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%6414 = torch.aten.dequantize.self %6413 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%6415 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6416 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2120 = torch.constant.int 12
%6417 = torch.aten.item %6415 : !torch.vtensor<[],f32> -> !torch.float
%6418 = torch.aten.item %6416 : !torch.vtensor<[],si8> -> !torch.int
%6419 = torch.aten.quantize_per_tensor %190, %6417, %6418, %int12_2120 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6420 = torch.aten.int_repr %6419 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%6421 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6422 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6423 = torch.aten.item %6421 : !torch.vtensor<[],f32> -> !torch.float
%6424 = torch.aten.item %6422 : !torch.vtensor<[],si8> -> !torch.int
%6425 = torch.aten._make_per_tensor_quantized_tensor %6420, %6423, %6424 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6426 = torch.aten.dequantize.self %6425 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%6427 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6428 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2121 = torch.constant.int 12
%6429 = torch.aten.item %6427 : !torch.vtensor<[],f32> -> !torch.float
%6430 = torch.aten.item %6428 : !torch.vtensor<[],si8> -> !torch.int
%6431 = torch.aten.quantize_per_tensor %191, %6429, %6430, %int12_2121 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6432 = torch.aten.int_repr %6431 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6433 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6434 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6435 = torch.aten.item %6433 : !torch.vtensor<[],f32> -> !torch.float
%6436 = torch.aten.item %6434 : !torch.vtensor<[],si8> -> !torch.int
%6437 = torch.aten._make_per_tensor_quantized_tensor %6432, %6435, %6436 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6438 = torch.aten.dequantize.self %6437 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_2122 = torch.constant.int 1
%int1_2123 = torch.constant.int 1
%int1_2124 = torch.constant.int 1
%int1_2125 = torch.constant.int 1
%int1_2126 = torch.constant.int 1
%int1_2127 = torch.constant.int 1
%int0_2128 = torch.constant.int 0
%6439 = torch.prim.ListConstruct %int1_2122, %int1_2123 : (!torch.int, !torch.int) -> !torch.list<int>
%6440 = torch.prim.ListConstruct %int1_2124, %int1_2125 : (!torch.int, !torch.int) -> !torch.list<int>
%6441 = torch.prim.ListConstruct %int1_2126, %int1_2127 : (!torch.int, !torch.int) -> !torch.list<int>
%6442 = torch.prim.ListConstruct %int0_2128, %int0_2128 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2129 = torch.constant.bool false
%int1_2130 = torch.constant.int 1
%6443 = torch.aten.convolution %6414, %6426, %6438, %6441, %6439, %6440, %false_2129, %6442, %int1_2130 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,40,40],f32>
%6444 = torch.aten.relu %6443 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[1,32,40,40],f32>
%6445 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6446 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2131 = torch.constant.int 12
%6447 = torch.aten.item %6445 : !torch.vtensor<[],f32> -> !torch.float
%6448 = torch.aten.item %6446 : !torch.vtensor<[],si8> -> !torch.int
%6449 = torch.aten.quantize_per_tensor %6444, %6447, %6448, %int12_2131 : !torch.vtensor<[1,32,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%6450 = torch.aten.int_repr %6449 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],si8>
%6451 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6452 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6453 = torch.aten.item %6451 : !torch.vtensor<[],f32> -> !torch.float
%6454 = torch.aten.item %6452 : !torch.vtensor<[],si8> -> !torch.int
%6455 = torch.aten._make_per_tensor_quantized_tensor %6450, %6453, %6454 : !torch.vtensor<[1,32,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,40,40],!torch.qint8>
%6456 = torch.aten.dequantize.self %6455 : !torch.vtensor<[1,32,40,40],!torch.qint8> -> !torch.vtensor<[1,32,40,40],f32>
%6457 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%6458 = torch.vtensor.literal(dense<80> : tensor<si64>) : !torch.vtensor<[],si64>
%6459 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2132 = torch.constant.int 0
%int0_2133 = torch.constant.int 0
%int0_2134 = torch.constant.int 0
%6460 = torch.aten.select.int %6459, %int0_2132, %int0_2134 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6461 = torch.aten.item %6460 : !torch.vtensor<[1],si64> -> !torch.int
%6462 = torch.aten.lt.int %6461, %int0_2132 : !torch.int, !torch.int -> !torch.bool
%6463 = torch.aten.Int.bool %6462 : !torch.bool -> !torch.int
%6464 = torch.aten.mul.int %6463, %int0_2133 : !torch.int, !torch.int -> !torch.int
%6465 = torch.aten.add.int %6461, %6464 : !torch.int, !torch.int -> !torch.int
%6466 = torch.prim.ListConstruct %6465 : (!torch.int) -> !torch.list<int>
%false_2135 = torch.constant.bool false
%none_2136 = torch.constant.none
%6467 = torch.aten.tensor %6466, %none_2136, %none_2136, %false_2135 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2137, %indices_2138 = torch.aten.sort %6467, %int0_2132, %false_2135 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2139 = torch.constant.int 0
%6468 = torch.aten.select.int %values_2137, %int0_2132, %int0_2139 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6469 = torch.aten.item %6468 : !torch.vtensor<[1],si64> -> !torch.int
%6470 = torch.aten.unsqueeze %6457, %6469 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6471 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2140 = torch.constant.int 0
%int0_2141 = torch.constant.int 0
%int0_2142 = torch.constant.int 0
%6472 = torch.aten.select.int %6471, %int0_2140, %int0_2142 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6473 = torch.aten.item %6472 : !torch.vtensor<[1],si64> -> !torch.int
%6474 = torch.aten.lt.int %6473, %int0_2140 : !torch.int, !torch.int -> !torch.bool
%6475 = torch.aten.Int.bool %6474 : !torch.bool -> !torch.int
%6476 = torch.aten.mul.int %6475, %int0_2141 : !torch.int, !torch.int -> !torch.int
%6477 = torch.aten.add.int %6473, %6476 : !torch.int, !torch.int -> !torch.int
%6478 = torch.prim.ListConstruct %6477 : (!torch.int) -> !torch.list<int>
%false_2143 = torch.constant.bool false
%none_2144 = torch.constant.none
%6479 = torch.aten.tensor %6478, %none_2144, %none_2144, %false_2143 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2145, %indices_2146 = torch.aten.sort %6479, %int0_2140, %false_2143 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2147 = torch.constant.int 0
%6480 = torch.aten.select.int %values_2145, %int0_2140, %int0_2147 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6481 = torch.aten.item %6480 : !torch.vtensor<[1],si64> -> !torch.int
%6482 = torch.aten.unsqueeze %6458, %6481 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6483 = torch.prim.ListConstruct %6470, %6482 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2148 = torch.constant.int 0
%6484 = torch.aten.cat %6483, %int0_2148 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%6485 = torch.aten._shape_as_tensor %6456 : !torch.vtensor<[1,32,40,40],f32> -> !torch.vtensor<[4],si64>
%6486 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6487 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6488 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2149 = torch.constant.none
%int1_2150 = torch.constant.int 1
%6489 = torch.prim.ListConstruct %int1_2150 : (!torch.int) -> !torch.list<int>
%6490 = torch.aten.ones %6489, %none_2149, %none_2149, %none_2149, %none_2149 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2151 = torch.constant.int 0
%int0_2152 = torch.constant.int 0
%6491 = torch.prim.NumToTensor.Scalar %int0_2152 : !torch.int -> !torch.vtensor<[1],si64>
%6492 = torch.aten.index_select %6487, %int0_2151, %6491 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6493 = torch.aten.item %6492 : !torch.vtensor<[1],si64> -> !torch.int
%6494 = torch.aten.index_select %6488, %int0_2151, %6491 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6495 = torch.aten.item %6494 : !torch.vtensor<[1],si64> -> !torch.int
%6496 = torch.aten.index_select %6486, %int0_2151, %6491 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6497 = torch.aten.item %6496 : !torch.vtensor<[1],si64> -> !torch.int
%6498 = torch.aten.index_select %6490, %int0_2151, %6491 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6499 = torch.aten.item %6498 : !torch.vtensor<[1],si64> -> !torch.int
%6500 = torch.aten.slice.Tensor %6485, %6497, %6493, %6495, %6499 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2153 = torch.constant.int 4
%none_2154 = torch.constant.none
%false_2155 = torch.constant.bool false
%6501 = torch.aten.to.dtype %6484, %int4_2153, %false_2155, %false_2155, %none_2154 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%6502 = torch.prim.ListConstruct %6500, %6501 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2156 = torch.constant.int 0
%6503 = torch.aten.cat %6502, %int0_2156 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%6504 = torch.operator "onnx.Resize"(%6456, %none, %none, %6503) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,40,40],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%6505 = torch.prim.ListConstruct %6504, %5985 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,80,80],f32>) -> !torch.list<vtensor>
%int1_2157 = torch.constant.int 1
%6506 = torch.aten.cat %6505, %int1_2157 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,80,80],f32>
%6507 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6508 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2158 = torch.constant.int 12
%6509 = torch.aten.item %6507 : !torch.vtensor<[],f32> -> !torch.float
%6510 = torch.aten.item %6508 : !torch.vtensor<[],si8> -> !torch.int
%6511 = torch.aten.quantize_per_tensor %6506, %6509, %6510, %int12_2158 : !torch.vtensor<[1,?,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%6512 = torch.aten.int_repr %6511 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],si8>
%6513 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6514 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6515 = torch.aten.item %6513 : !torch.vtensor<[],f32> -> !torch.float
%6516 = torch.aten.item %6514 : !torch.vtensor<[],si8> -> !torch.int
%6517 = torch.aten._make_per_tensor_quantized_tensor %6512, %6515, %6516 : !torch.vtensor<[1,?,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,80,80],!torch.qint8>
%6518 = torch.aten.dequantize.self %6517 : !torch.vtensor<[1,?,80,80],!torch.qint8> -> !torch.vtensor<[1,?,80,80],f32>
%6519 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6520 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2159 = torch.constant.int 12
%6521 = torch.aten.item %6519 : !torch.vtensor<[],f32> -> !torch.float
%6522 = torch.aten.item %6520 : !torch.vtensor<[],si8> -> !torch.int
%6523 = torch.aten.quantize_per_tensor %192, %6521, %6522, %int12_2159 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6524 = torch.aten.int_repr %6523 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%6525 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%6526 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6527 = torch.aten.item %6525 : !torch.vtensor<[],f32> -> !torch.float
%6528 = torch.aten.item %6526 : !torch.vtensor<[],si8> -> !torch.int
%6529 = torch.aten._make_per_tensor_quantized_tensor %6524, %6527, %6528 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%6530 = torch.aten.dequantize.self %6529 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%6531 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6532 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2160 = torch.constant.int 12
%6533 = torch.aten.item %6531 : !torch.vtensor<[],f32> -> !torch.float
%6534 = torch.aten.item %6532 : !torch.vtensor<[],si8> -> !torch.int
%6535 = torch.aten.quantize_per_tensor %193, %6533, %6534, %int12_2160 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6536 = torch.aten.int_repr %6535 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%6537 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6538 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6539 = torch.aten.item %6537 : !torch.vtensor<[],f32> -> !torch.float
%6540 = torch.aten.item %6538 : !torch.vtensor<[],si8> -> !torch.int
%6541 = torch.aten._make_per_tensor_quantized_tensor %6536, %6539, %6540 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%6542 = torch.aten.dequantize.self %6541 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int1_2161 = torch.constant.int 1
%int1_2162 = torch.constant.int 1
%int1_2163 = torch.constant.int 1
%int1_2164 = torch.constant.int 1
%int1_2165 = torch.constant.int 1
%int1_2166 = torch.constant.int 1
%int0_2167 = torch.constant.int 0
%6543 = torch.prim.ListConstruct %int1_2161, %int1_2162 : (!torch.int, !torch.int) -> !torch.list<int>
%6544 = torch.prim.ListConstruct %int1_2163, %int1_2164 : (!torch.int, !torch.int) -> !torch.list<int>
%6545 = torch.prim.ListConstruct %int1_2165, %int1_2166 : (!torch.int, !torch.int) -> !torch.list<int>
%6546 = torch.prim.ListConstruct %int0_2167, %int0_2167 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2168 = torch.constant.bool false
%int1_2169 = torch.constant.int 1
%6547 = torch.aten.convolution %6518, %6530, %6542, %6545, %6543, %6544, %false_2168, %6546, %int1_2169 : !torch.vtensor<[1,?,80,80],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,80,80],f32>
%6548 = torch.aten.relu %6547 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[1,32,80,80],f32>
%6549 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6550 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2170 = torch.constant.int 12
%6551 = torch.aten.item %6549 : !torch.vtensor<[],f32> -> !torch.float
%6552 = torch.aten.item %6550 : !torch.vtensor<[],si8> -> !torch.int
%6553 = torch.aten.quantize_per_tensor %6548, %6551, %6552, %int12_2170 : !torch.vtensor<[1,32,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%6554 = torch.aten.int_repr %6553 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],si8>
%6555 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6556 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6557 = torch.aten.item %6555 : !torch.vtensor<[],f32> -> !torch.float
%6558 = torch.aten.item %6556 : !torch.vtensor<[],si8> -> !torch.int
%6559 = torch.aten._make_per_tensor_quantized_tensor %6554, %6557, %6558 : !torch.vtensor<[1,32,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,80,80],!torch.qint8>
%6560 = torch.aten.dequantize.self %6559 : !torch.vtensor<[1,32,80,80],!torch.qint8> -> !torch.vtensor<[1,32,80,80],f32>
%6561 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%6562 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%6563 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2171 = torch.constant.int 0
%int0_2172 = torch.constant.int 0
%int0_2173 = torch.constant.int 0
%6564 = torch.aten.select.int %6563, %int0_2171, %int0_2173 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6565 = torch.aten.item %6564 : !torch.vtensor<[1],si64> -> !torch.int
%6566 = torch.aten.lt.int %6565, %int0_2171 : !torch.int, !torch.int -> !torch.bool
%6567 = torch.aten.Int.bool %6566 : !torch.bool -> !torch.int
%6568 = torch.aten.mul.int %6567, %int0_2172 : !torch.int, !torch.int -> !torch.int
%6569 = torch.aten.add.int %6565, %6568 : !torch.int, !torch.int -> !torch.int
%6570 = torch.prim.ListConstruct %6569 : (!torch.int) -> !torch.list<int>
%false_2174 = torch.constant.bool false
%none_2175 = torch.constant.none
%6571 = torch.aten.tensor %6570, %none_2175, %none_2175, %false_2174 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2176, %indices_2177 = torch.aten.sort %6571, %int0_2171, %false_2174 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2178 = torch.constant.int 0
%6572 = torch.aten.select.int %values_2176, %int0_2171, %int0_2178 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6573 = torch.aten.item %6572 : !torch.vtensor<[1],si64> -> !torch.int
%6574 = torch.aten.unsqueeze %6561, %6573 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6575 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2179 = torch.constant.int 0
%int0_2180 = torch.constant.int 0
%int0_2181 = torch.constant.int 0
%6576 = torch.aten.select.int %6575, %int0_2179, %int0_2181 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6577 = torch.aten.item %6576 : !torch.vtensor<[1],si64> -> !torch.int
%6578 = torch.aten.lt.int %6577, %int0_2179 : !torch.int, !torch.int -> !torch.bool
%6579 = torch.aten.Int.bool %6578 : !torch.bool -> !torch.int
%6580 = torch.aten.mul.int %6579, %int0_2180 : !torch.int, !torch.int -> !torch.int
%6581 = torch.aten.add.int %6577, %6580 : !torch.int, !torch.int -> !torch.int
%6582 = torch.prim.ListConstruct %6581 : (!torch.int) -> !torch.list<int>
%false_2182 = torch.constant.bool false
%none_2183 = torch.constant.none
%6583 = torch.aten.tensor %6582, %none_2183, %none_2183, %false_2182 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2184, %indices_2185 = torch.aten.sort %6583, %int0_2179, %false_2182 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2186 = torch.constant.int 0
%6584 = torch.aten.select.int %values_2184, %int0_2179, %int0_2186 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6585 = torch.aten.item %6584 : !torch.vtensor<[1],si64> -> !torch.int
%6586 = torch.aten.unsqueeze %6562, %6585 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6587 = torch.prim.ListConstruct %6574, %6586 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2187 = torch.constant.int 0
%6588 = torch.aten.cat %6587, %int0_2187 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%6589 = torch.aten._shape_as_tensor %6560 : !torch.vtensor<[1,32,80,80],f32> -> !torch.vtensor<[4],si64>
%6590 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6591 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6592 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2188 = torch.constant.none
%int1_2189 = torch.constant.int 1
%6593 = torch.prim.ListConstruct %int1_2189 : (!torch.int) -> !torch.list<int>
%6594 = torch.aten.ones %6593, %none_2188, %none_2188, %none_2188, %none_2188 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2190 = torch.constant.int 0
%int0_2191 = torch.constant.int 0
%6595 = torch.prim.NumToTensor.Scalar %int0_2191 : !torch.int -> !torch.vtensor<[1],si64>
%6596 = torch.aten.index_select %6591, %int0_2190, %6595 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6597 = torch.aten.item %6596 : !torch.vtensor<[1],si64> -> !torch.int
%6598 = torch.aten.index_select %6592, %int0_2190, %6595 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6599 = torch.aten.item %6598 : !torch.vtensor<[1],si64> -> !torch.int
%6600 = torch.aten.index_select %6590, %int0_2190, %6595 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6601 = torch.aten.item %6600 : !torch.vtensor<[1],si64> -> !torch.int
%6602 = torch.aten.index_select %6594, %int0_2190, %6595 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6603 = torch.aten.item %6602 : !torch.vtensor<[1],si64> -> !torch.int
%6604 = torch.aten.slice.Tensor %6589, %6601, %6597, %6599, %6603 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2192 = torch.constant.int 4
%none_2193 = torch.constant.none
%false_2194 = torch.constant.bool false
%6605 = torch.aten.to.dtype %6588, %int4_2192, %false_2194, %false_2194, %none_2193 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%6606 = torch.prim.ListConstruct %6604, %6605 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2195 = torch.constant.int 0
%6607 = torch.aten.cat %6606, %int0_2195 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%6608 = torch.operator "onnx.Resize"(%6560, %none, %none, %6607) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,32,80,80],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%6609 = torch.prim.ListConstruct %6608, %5926 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,32,160,160],f32>) -> !torch.list<vtensor>
%int1_2196 = torch.constant.int 1
%6610 = torch.aten.cat %6609, %int1_2196 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,160,160],f32>
%6611 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6612 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2197 = torch.constant.int 12
%6613 = torch.aten.item %6611 : !torch.vtensor<[],f32> -> !torch.float
%6614 = torch.aten.item %6612 : !torch.vtensor<[],si8> -> !torch.int
%6615 = torch.aten.quantize_per_tensor %6610, %6613, %6614, %int12_2197 : !torch.vtensor<[1,?,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%6616 = torch.aten.int_repr %6615 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],si8>
%6617 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6618 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6619 = torch.aten.item %6617 : !torch.vtensor<[],f32> -> !torch.float
%6620 = torch.aten.item %6618 : !torch.vtensor<[],si8> -> !torch.int
%6621 = torch.aten._make_per_tensor_quantized_tensor %6616, %6619, %6620 : !torch.vtensor<[1,?,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,160,160],!torch.qint8>
%6622 = torch.aten.dequantize.self %6621 : !torch.vtensor<[1,?,160,160],!torch.qint8> -> !torch.vtensor<[1,?,160,160],f32>
%6623 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6624 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2198 = torch.constant.int 12
%6625 = torch.aten.item %6623 : !torch.vtensor<[],f32> -> !torch.float
%6626 = torch.aten.item %6624 : !torch.vtensor<[],si8> -> !torch.int
%6627 = torch.aten.quantize_per_tensor %194, %6625, %6626, %int12_2198 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%6628 = torch.aten.int_repr %6627 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%6629 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6630 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6631 = torch.aten.item %6629 : !torch.vtensor<[],f32> -> !torch.float
%6632 = torch.aten.item %6630 : !torch.vtensor<[],si8> -> !torch.int
%6633 = torch.aten._make_per_tensor_quantized_tensor %6628, %6631, %6632 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%6634 = torch.aten.dequantize.self %6633 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%6635 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6636 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2199 = torch.constant.int 12
%6637 = torch.aten.item %6635 : !torch.vtensor<[],f32> -> !torch.float
%6638 = torch.aten.item %6636 : !torch.vtensor<[],si8> -> !torch.int
%6639 = torch.aten.quantize_per_tensor %195, %6637, %6638, %int12_2199 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%6640 = torch.aten.int_repr %6639 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%6641 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6642 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6643 = torch.aten.item %6641 : !torch.vtensor<[],f32> -> !torch.float
%6644 = torch.aten.item %6642 : !torch.vtensor<[],si8> -> !torch.int
%6645 = torch.aten._make_per_tensor_quantized_tensor %6640, %6643, %6644 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%6646 = torch.aten.dequantize.self %6645 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_2200 = torch.constant.int 1
%int1_2201 = torch.constant.int 1
%int1_2202 = torch.constant.int 1
%int1_2203 = torch.constant.int 1
%int1_2204 = torch.constant.int 1
%int1_2205 = torch.constant.int 1
%int0_2206 = torch.constant.int 0
%6647 = torch.prim.ListConstruct %int1_2200, %int1_2201 : (!torch.int, !torch.int) -> !torch.list<int>
%6648 = torch.prim.ListConstruct %int1_2202, %int1_2203 : (!torch.int, !torch.int) -> !torch.list<int>
%6649 = torch.prim.ListConstruct %int1_2204, %int1_2205 : (!torch.int, !torch.int) -> !torch.list<int>
%6650 = torch.prim.ListConstruct %int0_2206, %int0_2206 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2207 = torch.constant.bool false
%int1_2208 = torch.constant.int 1
%6651 = torch.aten.convolution %6622, %6634, %6646, %6649, %6647, %6648, %false_2207, %6650, %int1_2208 : !torch.vtensor<[1,?,160,160],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,160,160],f32>
%6652 = torch.aten.relu %6651 : !torch.vtensor<[1,64,160,160],f32> -> !torch.vtensor<[1,64,160,160],f32>
%6653 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6654 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2209 = torch.constant.int 12
%6655 = torch.aten.item %6653 : !torch.vtensor<[],f32> -> !torch.float
%6656 = torch.aten.item %6654 : !torch.vtensor<[],si8> -> !torch.int
%6657 = torch.aten.quantize_per_tensor %6652, %6655, %6656, %int12_2209 : !torch.vtensor<[1,64,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%6658 = torch.aten.int_repr %6657 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],si8>
%6659 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6660 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6661 = torch.aten.item %6659 : !torch.vtensor<[],f32> -> !torch.float
%6662 = torch.aten.item %6660 : !torch.vtensor<[],si8> -> !torch.int
%6663 = torch.aten._make_per_tensor_quantized_tensor %6658, %6661, %6662 : !torch.vtensor<[1,64,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%6664 = torch.aten.dequantize.self %6663 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],f32>
%int1_2210 = torch.constant.int 1
%6665 = torch.aten.add.Tensor %6664, %5884, %int1_2210 : !torch.vtensor<[1,64,160,160],f32>, !torch.vtensor<[1,64,160,160],f32>, !torch.int -> !torch.vtensor<[1,64,160,160],f32>
%6666 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6667 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2211 = torch.constant.int 12
%6668 = torch.aten.item %6666 : !torch.vtensor<[],f32> -> !torch.float
%6669 = torch.aten.item %6667 : !torch.vtensor<[],si8> -> !torch.int
%6670 = torch.aten.quantize_per_tensor %6665, %6668, %6669, %int12_2211 : !torch.vtensor<[1,64,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%6671 = torch.aten.int_repr %6670 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],si8>
%6672 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6673 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6674 = torch.aten.item %6672 : !torch.vtensor<[],f32> -> !torch.float
%6675 = torch.aten.item %6673 : !torch.vtensor<[],si8> -> !torch.int
%6676 = torch.aten._make_per_tensor_quantized_tensor %6671, %6674, %6675 : !torch.vtensor<[1,64,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,160,160],!torch.qint8>
%6677 = torch.aten.dequantize.self %6676 : !torch.vtensor<[1,64,160,160],!torch.qint8> -> !torch.vtensor<[1,64,160,160],f32>
%6678 = torch.vtensor.literal(dense<320> : tensor<si64>) : !torch.vtensor<[],si64>
%6679 = torch.vtensor.literal(dense<320> : tensor<si64>) : !torch.vtensor<[],si64>
%6680 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2212 = torch.constant.int 0
%int0_2213 = torch.constant.int 0
%int0_2214 = torch.constant.int 0
%6681 = torch.aten.select.int %6680, %int0_2212, %int0_2214 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6682 = torch.aten.item %6681 : !torch.vtensor<[1],si64> -> !torch.int
%6683 = torch.aten.lt.int %6682, %int0_2212 : !torch.int, !torch.int -> !torch.bool
%6684 = torch.aten.Int.bool %6683 : !torch.bool -> !torch.int
%6685 = torch.aten.mul.int %6684, %int0_2213 : !torch.int, !torch.int -> !torch.int
%6686 = torch.aten.add.int %6682, %6685 : !torch.int, !torch.int -> !torch.int
%6687 = torch.prim.ListConstruct %6686 : (!torch.int) -> !torch.list<int>
%false_2215 = torch.constant.bool false
%none_2216 = torch.constant.none
%6688 = torch.aten.tensor %6687, %none_2216, %none_2216, %false_2215 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2217, %indices_2218 = torch.aten.sort %6688, %int0_2212, %false_2215 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2219 = torch.constant.int 0
%6689 = torch.aten.select.int %values_2217, %int0_2212, %int0_2219 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6690 = torch.aten.item %6689 : !torch.vtensor<[1],si64> -> !torch.int
%6691 = torch.aten.unsqueeze %6678, %6690 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6692 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2220 = torch.constant.int 0
%int0_2221 = torch.constant.int 0
%int0_2222 = torch.constant.int 0
%6693 = torch.aten.select.int %6692, %int0_2220, %int0_2222 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6694 = torch.aten.item %6693 : !torch.vtensor<[1],si64> -> !torch.int
%6695 = torch.aten.lt.int %6694, %int0_2220 : !torch.int, !torch.int -> !torch.bool
%6696 = torch.aten.Int.bool %6695 : !torch.bool -> !torch.int
%6697 = torch.aten.mul.int %6696, %int0_2221 : !torch.int, !torch.int -> !torch.int
%6698 = torch.aten.add.int %6694, %6697 : !torch.int, !torch.int -> !torch.int
%6699 = torch.prim.ListConstruct %6698 : (!torch.int) -> !torch.list<int>
%false_2223 = torch.constant.bool false
%none_2224 = torch.constant.none
%6700 = torch.aten.tensor %6699, %none_2224, %none_2224, %false_2223 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2225, %indices_2226 = torch.aten.sort %6700, %int0_2220, %false_2223 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2227 = torch.constant.int 0
%6701 = torch.aten.select.int %values_2225, %int0_2220, %int0_2227 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%6702 = torch.aten.item %6701 : !torch.vtensor<[1],si64> -> !torch.int
%6703 = torch.aten.unsqueeze %6679, %6702 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%6704 = torch.prim.ListConstruct %6691, %6703 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2228 = torch.constant.int 0
%6705 = torch.aten.cat %6704, %int0_2228 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%6706 = torch.aten._shape_as_tensor %6677 : !torch.vtensor<[1,64,160,160],f32> -> !torch.vtensor<[4],si64>
%6707 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6708 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%6709 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2229 = torch.constant.none
%int1_2230 = torch.constant.int 1
%6710 = torch.prim.ListConstruct %int1_2230 : (!torch.int) -> !torch.list<int>
%6711 = torch.aten.ones %6710, %none_2229, %none_2229, %none_2229, %none_2229 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2231 = torch.constant.int 0
%int0_2232 = torch.constant.int 0
%6712 = torch.prim.NumToTensor.Scalar %int0_2232 : !torch.int -> !torch.vtensor<[1],si64>
%6713 = torch.aten.index_select %6708, %int0_2231, %6712 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6714 = torch.aten.item %6713 : !torch.vtensor<[1],si64> -> !torch.int
%6715 = torch.aten.index_select %6709, %int0_2231, %6712 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6716 = torch.aten.item %6715 : !torch.vtensor<[1],si64> -> !torch.int
%6717 = torch.aten.index_select %6707, %int0_2231, %6712 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6718 = torch.aten.item %6717 : !torch.vtensor<[1],si64> -> !torch.int
%6719 = torch.aten.index_select %6711, %int0_2231, %6712 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%6720 = torch.aten.item %6719 : !torch.vtensor<[1],si64> -> !torch.int
%6721 = torch.aten.slice.Tensor %6706, %6718, %6714, %6716, %6720 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2233 = torch.constant.int 4
%none_2234 = torch.constant.none
%false_2235 = torch.constant.bool false
%6722 = torch.aten.to.dtype %6705, %int4_2233, %false_2235, %false_2235, %none_2234 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%6723 = torch.prim.ListConstruct %6721, %6722 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2236 = torch.constant.int 0
%6724 = torch.aten.cat %6723, %int0_2236 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%6725 = torch.operator "onnx.Resize"(%6677, %none, %none, %6724) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,64,160,160],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%6726 = torch.prim.ListConstruct %6725, %1247 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,64,320,320],f32>) -> !torch.list<vtensor>
%int1_2237 = torch.constant.int 1
%6727 = torch.aten.cat %6726, %int1_2237 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,320,320],f32>
%6728 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%6729 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2238 = torch.constant.int 12
%6730 = torch.aten.item %6728 : !torch.vtensor<[],f32> -> !torch.float
%6731 = torch.aten.item %6729 : !torch.vtensor<[],si8> -> !torch.int
%6732 = torch.aten.quantize_per_tensor %6727, %6730, %6731, %int12_2238 : !torch.vtensor<[1,?,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,320,320],!torch.qint8>
%6733 = torch.aten.int_repr %6732 : !torch.vtensor<[1,?,320,320],!torch.qint8> -> !torch.vtensor<[1,?,320,320],si8>
%6734 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%6735 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6736 = torch.aten.item %6734 : !torch.vtensor<[],f32> -> !torch.float
%6737 = torch.aten.item %6735 : !torch.vtensor<[],si8> -> !torch.int
%6738 = torch.aten._make_per_tensor_quantized_tensor %6733, %6736, %6737 : !torch.vtensor<[1,?,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,320,320],!torch.qint8>
%6739 = torch.aten.dequantize.self %6738 : !torch.vtensor<[1,?,320,320],!torch.qint8> -> !torch.vtensor<[1,?,320,320],f32>
%6740 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2239 = torch.constant.int 12
%6742 = torch.aten.item %6740 : !torch.vtensor<[],f32> -> !torch.float
%6743 = torch.aten.item %6741 : !torch.vtensor<[],si8> -> !torch.int
%6744 = torch.aten.quantize_per_tensor %196, %6742, %6743, %int12_2239 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%6745 = torch.aten.int_repr %6744 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%6746 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6748 = torch.aten.item %6746 : !torch.vtensor<[],f32> -> !torch.float
%6749 = torch.aten.item %6747 : !torch.vtensor<[],si8> -> !torch.int
%6750 = torch.aten._make_per_tensor_quantized_tensor %6745, %6748, %6749 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%6751 = torch.aten.dequantize.self %6750 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%6752 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2240 = torch.constant.int 12
%6754 = torch.aten.item %6752 : !torch.vtensor<[],f32> -> !torch.float
%6755 = torch.aten.item %6753 : !torch.vtensor<[],si8> -> !torch.int
%6756 = torch.aten.quantize_per_tensor %197, %6754, %6755, %int12_2240 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%6757 = torch.aten.int_repr %6756 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%6758 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6759 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6760 = torch.aten.item %6758 : !torch.vtensor<[],f32> -> !torch.float
%6761 = torch.aten.item %6759 : !torch.vtensor<[],si8> -> !torch.int
%6762 = torch.aten._make_per_tensor_quantized_tensor %6757, %6760, %6761 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%6763 = torch.aten.dequantize.self %6762 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_2241 = torch.constant.int 1
%int1_2242 = torch.constant.int 1
%int1_2243 = torch.constant.int 1
%int1_2244 = torch.constant.int 1
%int1_2245 = torch.constant.int 1
%int1_2246 = torch.constant.int 1
%int0_2247 = torch.constant.int 0
%6764 = torch.prim.ListConstruct %int1_2241, %int1_2242 : (!torch.int, !torch.int) -> !torch.list<int>
%6765 = torch.prim.ListConstruct %int1_2243, %int1_2244 : (!torch.int, !torch.int) -> !torch.list<int>
%6766 = torch.prim.ListConstruct %int1_2245, %int1_2246 : (!torch.int, !torch.int) -> !torch.list<int>
%6767 = torch.prim.ListConstruct %int0_2247, %int0_2247 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2248 = torch.constant.bool false
%int1_2249 = torch.constant.int 1
%6768 = torch.aten.convolution %6739, %6751, %6763, %6766, %6764, %6765, %false_2248, %6767, %int1_2249 : !torch.vtensor<[1,?,320,320],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,320,320],f32>
%6769 = torch.aten.relu %6768 : !torch.vtensor<[1,64,320,320],f32> -> !torch.vtensor<[1,64,320,320],f32>
%6770 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6771 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2250 = torch.constant.int 12
%6772 = torch.aten.item %6770 : !torch.vtensor<[],f32> -> !torch.float
%6773 = torch.aten.item %6771 : !torch.vtensor<[],si8> -> !torch.int
%6774 = torch.aten.quantize_per_tensor %6769, %6772, %6773, %int12_2250 : !torch.vtensor<[1,64,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%6775 = torch.aten.int_repr %6774 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],si8>
%6776 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6777 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6778 = torch.aten.item %6776 : !torch.vtensor<[],f32> -> !torch.float
%6779 = torch.aten.item %6777 : !torch.vtensor<[],si8> -> !torch.int
%6780 = torch.aten._make_per_tensor_quantized_tensor %6775, %6778, %6779 : !torch.vtensor<[1,64,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,320,320],!torch.qint8>
%6781 = torch.aten.dequantize.self %6780 : !torch.vtensor<[1,64,320,320],!torch.qint8> -> !torch.vtensor<[1,64,320,320],f32>
%6782 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2251 = torch.constant.int 12
%6784 = torch.aten.item %6782 : !torch.vtensor<[],f32> -> !torch.float
%6785 = torch.aten.item %6783 : !torch.vtensor<[],si8> -> !torch.int
%6786 = torch.aten.quantize_per_tensor %198, %6784, %6785, %int12_2251 : !torch.vtensor<[16,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,64,3,3],!torch.qint8>
%6787 = torch.aten.int_repr %6786 : !torch.vtensor<[16,64,3,3],!torch.qint8> -> !torch.vtensor<[16,64,3,3],si8>
%6788 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6789 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6790 = torch.aten.item %6788 : !torch.vtensor<[],f32> -> !torch.float
%6791 = torch.aten.item %6789 : !torch.vtensor<[],si8> -> !torch.int
%6792 = torch.aten._make_per_tensor_quantized_tensor %6787, %6790, %6791 : !torch.vtensor<[16,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,64,3,3],!torch.qint8>
%6793 = torch.aten.dequantize.self %6792 : !torch.vtensor<[16,64,3,3],!torch.qint8> -> !torch.vtensor<[16,64,3,3],f32>
%6794 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2252 = torch.constant.int 12
%6796 = torch.aten.item %6794 : !torch.vtensor<[],f32> -> !torch.float
%6797 = torch.aten.item %6795 : !torch.vtensor<[],si8> -> !torch.int
%6798 = torch.aten.quantize_per_tensor %199, %6796, %6797, %int12_2252 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6799 = torch.aten.int_repr %6798 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%6800 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6802 = torch.aten.item %6800 : !torch.vtensor<[],f32> -> !torch.float
%6803 = torch.aten.item %6801 : !torch.vtensor<[],si8> -> !torch.int
%6804 = torch.aten._make_per_tensor_quantized_tensor %6799, %6802, %6803 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6805 = torch.aten.dequantize.self %6804 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2253 = torch.constant.int 1
%int1_2254 = torch.constant.int 1
%int1_2255 = torch.constant.int 1
%int1_2256 = torch.constant.int 1
%int1_2257 = torch.constant.int 1
%int1_2258 = torch.constant.int 1
%int0_2259 = torch.constant.int 0
%6806 = torch.prim.ListConstruct %int1_2253, %int1_2254 : (!torch.int, !torch.int) -> !torch.list<int>
%6807 = torch.prim.ListConstruct %int1_2255, %int1_2256 : (!torch.int, !torch.int) -> !torch.list<int>
%6808 = torch.prim.ListConstruct %int1_2257, %int1_2258 : (!torch.int, !torch.int) -> !torch.list<int>
%6809 = torch.prim.ListConstruct %int0_2259, %int0_2259 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2260 = torch.constant.bool false
%int1_2261 = torch.constant.int 1
%6810 = torch.aten.convolution %6781, %6793, %6805, %6808, %6806, %6807, %false_2260, %6809, %int1_2261 : !torch.vtensor<[1,64,320,320],f32>, !torch.vtensor<[16,64,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,320,320],f32>
%6811 = torch.aten.relu %6810 : !torch.vtensor<[1,16,320,320],f32> -> !torch.vtensor<[1,16,320,320],f32>
%6812 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6813 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2262 = torch.constant.int 12
%6814 = torch.aten.item %6812 : !torch.vtensor<[],f32> -> !torch.float
%6815 = torch.aten.item %6813 : !torch.vtensor<[],si8> -> !torch.int
%6816 = torch.aten.quantize_per_tensor %6811, %6814, %6815, %int12_2262 : !torch.vtensor<[1,16,320,320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,320,320],!torch.qint8>
%6817 = torch.aten.int_repr %6816 : !torch.vtensor<[1,16,320,320],!torch.qint8> -> !torch.vtensor<[1,16,320,320],si8>
%6818 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6819 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6820 = torch.aten.item %6818 : !torch.vtensor<[],f32> -> !torch.float
%6821 = torch.aten.item %6819 : !torch.vtensor<[],si8> -> !torch.int
%6822 = torch.aten._make_per_tensor_quantized_tensor %6817, %6820, %6821 : !torch.vtensor<[1,16,320,320],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,320,320],!torch.qint8>
%6823 = torch.aten.dequantize.self %6822 : !torch.vtensor<[1,16,320,320],!torch.qint8> -> !torch.vtensor<[1,16,320,320],f32>
%int2_2263 = torch.constant.int 2
%int2_2264 = torch.constant.int 2
%6824 = torch.prim.ListConstruct %int2_2263, %int2_2264 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_2265 = torch.constant.int 0
%int0_2266 = torch.constant.int 0
%6825 = torch.prim.ListConstruct %int0_2265, %int0_2266 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_2267 = torch.constant.int 2
%int2_2268 = torch.constant.int 2
%6826 = torch.prim.ListConstruct %int2_2267, %int2_2268 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2269 = torch.constant.int 1
%int1_2270 = torch.constant.int 1
%6827 = torch.prim.ListConstruct %int1_2269, %int1_2270 : (!torch.int, !torch.int) -> !torch.list<int>
%true_2271 = torch.constant.bool true
%6828 = torch.aten.max_pool2d %6823, %6824, %6826, %6825, %6827, %true_2271 : !torch.vtensor<[1,16,320,320],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,16,160,160],f32>
%6829 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6830 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2272 = torch.constant.int 12
%6831 = torch.aten.item %6829 : !torch.vtensor<[],f32> -> !torch.float
%6832 = torch.aten.item %6830 : !torch.vtensor<[],si8> -> !torch.int
%6833 = torch.aten.quantize_per_tensor %6828, %6831, %6832, %int12_2272 : !torch.vtensor<[1,16,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,160,160],!torch.qint8>
%6834 = torch.aten.int_repr %6833 : !torch.vtensor<[1,16,160,160],!torch.qint8> -> !torch.vtensor<[1,16,160,160],si8>
%6835 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6836 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6837 = torch.aten.item %6835 : !torch.vtensor<[],f32> -> !torch.float
%6838 = torch.aten.item %6836 : !torch.vtensor<[],si8> -> !torch.int
%6839 = torch.aten._make_per_tensor_quantized_tensor %6834, %6837, %6838 : !torch.vtensor<[1,16,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,160,160],!torch.qint8>
%6840 = torch.aten.dequantize.self %6839 : !torch.vtensor<[1,16,160,160],!torch.qint8> -> !torch.vtensor<[1,16,160,160],f32>
%6841 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6842 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2273 = torch.constant.int 12
%6843 = torch.aten.item %6841 : !torch.vtensor<[],f32> -> !torch.float
%6844 = torch.aten.item %6842 : !torch.vtensor<[],si8> -> !torch.int
%6845 = torch.aten.quantize_per_tensor %200, %6843, %6844, %int12_2273 : !torch.vtensor<[16,16,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%6846 = torch.aten.int_repr %6845 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],si8>
%6847 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6848 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6849 = torch.aten.item %6847 : !torch.vtensor<[],f32> -> !torch.float
%6850 = torch.aten.item %6848 : !torch.vtensor<[],si8> -> !torch.int
%6851 = torch.aten._make_per_tensor_quantized_tensor %6846, %6849, %6850 : !torch.vtensor<[16,16,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%6852 = torch.aten.dequantize.self %6851 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],f32>
%6853 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6854 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2274 = torch.constant.int 12
%6855 = torch.aten.item %6853 : !torch.vtensor<[],f32> -> !torch.float
%6856 = torch.aten.item %6854 : !torch.vtensor<[],si8> -> !torch.int
%6857 = torch.aten.quantize_per_tensor %201, %6855, %6856, %int12_2274 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6858 = torch.aten.int_repr %6857 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%6859 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6860 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6861 = torch.aten.item %6859 : !torch.vtensor<[],f32> -> !torch.float
%6862 = torch.aten.item %6860 : !torch.vtensor<[],si8> -> !torch.int
%6863 = torch.aten._make_per_tensor_quantized_tensor %6858, %6861, %6862 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6864 = torch.aten.dequantize.self %6863 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2275 = torch.constant.int 1
%int1_2276 = torch.constant.int 1
%int1_2277 = torch.constant.int 1
%int1_2278 = torch.constant.int 1
%int1_2279 = torch.constant.int 1
%int1_2280 = torch.constant.int 1
%int0_2281 = torch.constant.int 0
%6865 = torch.prim.ListConstruct %int1_2275, %int1_2276 : (!torch.int, !torch.int) -> !torch.list<int>
%6866 = torch.prim.ListConstruct %int1_2277, %int1_2278 : (!torch.int, !torch.int) -> !torch.list<int>
%6867 = torch.prim.ListConstruct %int1_2279, %int1_2280 : (!torch.int, !torch.int) -> !torch.list<int>
%6868 = torch.prim.ListConstruct %int0_2281, %int0_2281 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2282 = torch.constant.bool false
%int1_2283 = torch.constant.int 1
%6869 = torch.aten.convolution %6840, %6852, %6864, %6867, %6865, %6866, %false_2282, %6868, %int1_2283 : !torch.vtensor<[1,16,160,160],f32>, !torch.vtensor<[16,16,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,160,160],f32>
%6870 = torch.aten.relu %6869 : !torch.vtensor<[1,16,160,160],f32> -> !torch.vtensor<[1,16,160,160],f32>
%6871 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6872 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2284 = torch.constant.int 12
%6873 = torch.aten.item %6871 : !torch.vtensor<[],f32> -> !torch.float
%6874 = torch.aten.item %6872 : !torch.vtensor<[],si8> -> !torch.int
%6875 = torch.aten.quantize_per_tensor %6870, %6873, %6874, %int12_2284 : !torch.vtensor<[1,16,160,160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,160,160],!torch.qint8>
%6876 = torch.aten.int_repr %6875 : !torch.vtensor<[1,16,160,160],!torch.qint8> -> !torch.vtensor<[1,16,160,160],si8>
%6877 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6878 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6879 = torch.aten.item %6877 : !torch.vtensor<[],f32> -> !torch.float
%6880 = torch.aten.item %6878 : !torch.vtensor<[],si8> -> !torch.int
%6881 = torch.aten._make_per_tensor_quantized_tensor %6876, %6879, %6880 : !torch.vtensor<[1,16,160,160],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,160,160],!torch.qint8>
%6882 = torch.aten.dequantize.self %6881 : !torch.vtensor<[1,16,160,160],!torch.qint8> -> !torch.vtensor<[1,16,160,160],f32>
%int2_2285 = torch.constant.int 2
%int2_2286 = torch.constant.int 2
%6883 = torch.prim.ListConstruct %int2_2285, %int2_2286 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_2287 = torch.constant.int 0
%int0_2288 = torch.constant.int 0
%6884 = torch.prim.ListConstruct %int0_2287, %int0_2288 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_2289 = torch.constant.int 2
%int2_2290 = torch.constant.int 2
%6885 = torch.prim.ListConstruct %int2_2289, %int2_2290 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2291 = torch.constant.int 1
%int1_2292 = torch.constant.int 1
%6886 = torch.prim.ListConstruct %int1_2291, %int1_2292 : (!torch.int, !torch.int) -> !torch.list<int>
%true_2293 = torch.constant.bool true
%6887 = torch.aten.max_pool2d %6882, %6883, %6885, %6884, %6886, %true_2293 : !torch.vtensor<[1,16,160,160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,16,80,80],f32>
%6888 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6889 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2294 = torch.constant.int 12
%6890 = torch.aten.item %6888 : !torch.vtensor<[],f32> -> !torch.float
%6891 = torch.aten.item %6889 : !torch.vtensor<[],si8> -> !torch.int
%6892 = torch.aten.quantize_per_tensor %6887, %6890, %6891, %int12_2294 : !torch.vtensor<[1,16,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,80,80],!torch.qint8>
%6893 = torch.aten.int_repr %6892 : !torch.vtensor<[1,16,80,80],!torch.qint8> -> !torch.vtensor<[1,16,80,80],si8>
%6894 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6895 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6896 = torch.aten.item %6894 : !torch.vtensor<[],f32> -> !torch.float
%6897 = torch.aten.item %6895 : !torch.vtensor<[],si8> -> !torch.int
%6898 = torch.aten._make_per_tensor_quantized_tensor %6893, %6896, %6897 : !torch.vtensor<[1,16,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,80,80],!torch.qint8>
%6899 = torch.aten.dequantize.self %6898 : !torch.vtensor<[1,16,80,80],!torch.qint8> -> !torch.vtensor<[1,16,80,80],f32>
%6900 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6901 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2295 = torch.constant.int 12
%6902 = torch.aten.item %6900 : !torch.vtensor<[],f32> -> !torch.float
%6903 = torch.aten.item %6901 : !torch.vtensor<[],si8> -> !torch.int
%6904 = torch.aten.quantize_per_tensor %202, %6902, %6903, %int12_2295 : !torch.vtensor<[16,16,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%6905 = torch.aten.int_repr %6904 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],si8>
%6906 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6907 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6908 = torch.aten.item %6906 : !torch.vtensor<[],f32> -> !torch.float
%6909 = torch.aten.item %6907 : !torch.vtensor<[],si8> -> !torch.int
%6910 = torch.aten._make_per_tensor_quantized_tensor %6905, %6908, %6909 : !torch.vtensor<[16,16,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%6911 = torch.aten.dequantize.self %6910 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],f32>
%6912 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6913 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2296 = torch.constant.int 12
%6914 = torch.aten.item %6912 : !torch.vtensor<[],f32> -> !torch.float
%6915 = torch.aten.item %6913 : !torch.vtensor<[],si8> -> !torch.int
%6916 = torch.aten.quantize_per_tensor %203, %6914, %6915, %int12_2296 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6917 = torch.aten.int_repr %6916 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%6918 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6919 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6920 = torch.aten.item %6918 : !torch.vtensor<[],f32> -> !torch.float
%6921 = torch.aten.item %6919 : !torch.vtensor<[],si8> -> !torch.int
%6922 = torch.aten._make_per_tensor_quantized_tensor %6917, %6920, %6921 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6923 = torch.aten.dequantize.self %6922 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2297 = torch.constant.int 1
%int1_2298 = torch.constant.int 1
%int1_2299 = torch.constant.int 1
%int1_2300 = torch.constant.int 1
%int1_2301 = torch.constant.int 1
%int1_2302 = torch.constant.int 1
%int0_2303 = torch.constant.int 0
%6924 = torch.prim.ListConstruct %int1_2297, %int1_2298 : (!torch.int, !torch.int) -> !torch.list<int>
%6925 = torch.prim.ListConstruct %int1_2299, %int1_2300 : (!torch.int, !torch.int) -> !torch.list<int>
%6926 = torch.prim.ListConstruct %int1_2301, %int1_2302 : (!torch.int, !torch.int) -> !torch.list<int>
%6927 = torch.prim.ListConstruct %int0_2303, %int0_2303 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2304 = torch.constant.bool false
%int1_2305 = torch.constant.int 1
%6928 = torch.aten.convolution %6899, %6911, %6923, %6926, %6924, %6925, %false_2304, %6927, %int1_2305 : !torch.vtensor<[1,16,80,80],f32>, !torch.vtensor<[16,16,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,80,80],f32>
%6929 = torch.aten.relu %6928 : !torch.vtensor<[1,16,80,80],f32> -> !torch.vtensor<[1,16,80,80],f32>
%6930 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6931 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2306 = torch.constant.int 12
%6932 = torch.aten.item %6930 : !torch.vtensor<[],f32> -> !torch.float
%6933 = torch.aten.item %6931 : !torch.vtensor<[],si8> -> !torch.int
%6934 = torch.aten.quantize_per_tensor %6929, %6932, %6933, %int12_2306 : !torch.vtensor<[1,16,80,80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,80,80],!torch.qint8>
%6935 = torch.aten.int_repr %6934 : !torch.vtensor<[1,16,80,80],!torch.qint8> -> !torch.vtensor<[1,16,80,80],si8>
%6936 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6937 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6938 = torch.aten.item %6936 : !torch.vtensor<[],f32> -> !torch.float
%6939 = torch.aten.item %6937 : !torch.vtensor<[],si8> -> !torch.int
%6940 = torch.aten._make_per_tensor_quantized_tensor %6935, %6938, %6939 : !torch.vtensor<[1,16,80,80],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,80,80],!torch.qint8>
%6941 = torch.aten.dequantize.self %6940 : !torch.vtensor<[1,16,80,80],!torch.qint8> -> !torch.vtensor<[1,16,80,80],f32>
%int2_2307 = torch.constant.int 2
%int2_2308 = torch.constant.int 2
%6942 = torch.prim.ListConstruct %int2_2307, %int2_2308 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_2309 = torch.constant.int 0
%int0_2310 = torch.constant.int 0
%6943 = torch.prim.ListConstruct %int0_2309, %int0_2310 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_2311 = torch.constant.int 2
%int2_2312 = torch.constant.int 2
%6944 = torch.prim.ListConstruct %int2_2311, %int2_2312 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2313 = torch.constant.int 1
%int1_2314 = torch.constant.int 1
%6945 = torch.prim.ListConstruct %int1_2313, %int1_2314 : (!torch.int, !torch.int) -> !torch.list<int>
%true_2315 = torch.constant.bool true
%6946 = torch.aten.max_pool2d %6941, %6942, %6944, %6943, %6945, %true_2315 : !torch.vtensor<[1,16,80,80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,16,40,40],f32>
%6947 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6948 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2316 = torch.constant.int 12
%6949 = torch.aten.item %6947 : !torch.vtensor<[],f32> -> !torch.float
%6950 = torch.aten.item %6948 : !torch.vtensor<[],si8> -> !torch.int
%6951 = torch.aten.quantize_per_tensor %6946, %6949, %6950, %int12_2316 : !torch.vtensor<[1,16,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,40,40],!torch.qint8>
%6952 = torch.aten.int_repr %6951 : !torch.vtensor<[1,16,40,40],!torch.qint8> -> !torch.vtensor<[1,16,40,40],si8>
%6953 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6954 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6955 = torch.aten.item %6953 : !torch.vtensor<[],f32> -> !torch.float
%6956 = torch.aten.item %6954 : !torch.vtensor<[],si8> -> !torch.int
%6957 = torch.aten._make_per_tensor_quantized_tensor %6952, %6955, %6956 : !torch.vtensor<[1,16,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,40,40],!torch.qint8>
%6958 = torch.aten.dequantize.self %6957 : !torch.vtensor<[1,16,40,40],!torch.qint8> -> !torch.vtensor<[1,16,40,40],f32>
%6959 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6960 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2317 = torch.constant.int 12
%6961 = torch.aten.item %6959 : !torch.vtensor<[],f32> -> !torch.float
%6962 = torch.aten.item %6960 : !torch.vtensor<[],si8> -> !torch.int
%6963 = torch.aten.quantize_per_tensor %204, %6961, %6962, %int12_2317 : !torch.vtensor<[16,16,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%6964 = torch.aten.int_repr %6963 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],si8>
%6965 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%6966 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6967 = torch.aten.item %6965 : !torch.vtensor<[],f32> -> !torch.float
%6968 = torch.aten.item %6966 : !torch.vtensor<[],si8> -> !torch.int
%6969 = torch.aten._make_per_tensor_quantized_tensor %6964, %6967, %6968 : !torch.vtensor<[16,16,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%6970 = torch.aten.dequantize.self %6969 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],f32>
%6971 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6972 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2318 = torch.constant.int 12
%6973 = torch.aten.item %6971 : !torch.vtensor<[],f32> -> !torch.float
%6974 = torch.aten.item %6972 : !torch.vtensor<[],si8> -> !torch.int
%6975 = torch.aten.quantize_per_tensor %205, %6973, %6974, %int12_2318 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6976 = torch.aten.int_repr %6975 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%6977 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6978 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6979 = torch.aten.item %6977 : !torch.vtensor<[],f32> -> !torch.float
%6980 = torch.aten.item %6978 : !torch.vtensor<[],si8> -> !torch.int
%6981 = torch.aten._make_per_tensor_quantized_tensor %6976, %6979, %6980 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%6982 = torch.aten.dequantize.self %6981 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2319 = torch.constant.int 1
%int1_2320 = torch.constant.int 1
%int1_2321 = torch.constant.int 1
%int1_2322 = torch.constant.int 1
%int1_2323 = torch.constant.int 1
%int1_2324 = torch.constant.int 1
%int0_2325 = torch.constant.int 0
%6983 = torch.prim.ListConstruct %int1_2319, %int1_2320 : (!torch.int, !torch.int) -> !torch.list<int>
%6984 = torch.prim.ListConstruct %int1_2321, %int1_2322 : (!torch.int, !torch.int) -> !torch.list<int>
%6985 = torch.prim.ListConstruct %int1_2323, %int1_2324 : (!torch.int, !torch.int) -> !torch.list<int>
%6986 = torch.prim.ListConstruct %int0_2325, %int0_2325 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2326 = torch.constant.bool false
%int1_2327 = torch.constant.int 1
%6987 = torch.aten.convolution %6958, %6970, %6982, %6985, %6983, %6984, %false_2326, %6986, %int1_2327 : !torch.vtensor<[1,16,40,40],f32>, !torch.vtensor<[16,16,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,40,40],f32>
%6988 = torch.aten.relu %6987 : !torch.vtensor<[1,16,40,40],f32> -> !torch.vtensor<[1,16,40,40],f32>
%6989 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6990 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2328 = torch.constant.int 12
%6991 = torch.aten.item %6989 : !torch.vtensor<[],f32> -> !torch.float
%6992 = torch.aten.item %6990 : !torch.vtensor<[],si8> -> !torch.int
%6993 = torch.aten.quantize_per_tensor %6988, %6991, %6992, %int12_2328 : !torch.vtensor<[1,16,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,40,40],!torch.qint8>
%6994 = torch.aten.int_repr %6993 : !torch.vtensor<[1,16,40,40],!torch.qint8> -> !torch.vtensor<[1,16,40,40],si8>
%6995 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%6996 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%6997 = torch.aten.item %6995 : !torch.vtensor<[],f32> -> !torch.float
%6998 = torch.aten.item %6996 : !torch.vtensor<[],si8> -> !torch.int
%6999 = torch.aten._make_per_tensor_quantized_tensor %6994, %6997, %6998 : !torch.vtensor<[1,16,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,40,40],!torch.qint8>
%7000 = torch.aten.dequantize.self %6999 : !torch.vtensor<[1,16,40,40],!torch.qint8> -> !torch.vtensor<[1,16,40,40],f32>
%int2_2329 = torch.constant.int 2
%int2_2330 = torch.constant.int 2
%7001 = torch.prim.ListConstruct %int2_2329, %int2_2330 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_2331 = torch.constant.int 0
%int0_2332 = torch.constant.int 0
%7002 = torch.prim.ListConstruct %int0_2331, %int0_2332 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_2333 = torch.constant.int 2
%int2_2334 = torch.constant.int 2
%7003 = torch.prim.ListConstruct %int2_2333, %int2_2334 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2335 = torch.constant.int 1
%int1_2336 = torch.constant.int 1
%7004 = torch.prim.ListConstruct %int1_2335, %int1_2336 : (!torch.int, !torch.int) -> !torch.list<int>
%true_2337 = torch.constant.bool true
%7005 = torch.aten.max_pool2d %7000, %7001, %7003, %7002, %7004, %true_2337 : !torch.vtensor<[1,16,40,40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,16,20,20],f32>
%7006 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7007 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2338 = torch.constant.int 12
%7008 = torch.aten.item %7006 : !torch.vtensor<[],f32> -> !torch.float
%7009 = torch.aten.item %7007 : !torch.vtensor<[],si8> -> !torch.int
%7010 = torch.aten.quantize_per_tensor %7005, %7008, %7009, %int12_2338 : !torch.vtensor<[1,16,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,20,20],!torch.qint8>
%7011 = torch.aten.int_repr %7010 : !torch.vtensor<[1,16,20,20],!torch.qint8> -> !torch.vtensor<[1,16,20,20],si8>
%7012 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7013 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7014 = torch.aten.item %7012 : !torch.vtensor<[],f32> -> !torch.float
%7015 = torch.aten.item %7013 : !torch.vtensor<[],si8> -> !torch.int
%7016 = torch.aten._make_per_tensor_quantized_tensor %7011, %7014, %7015 : !torch.vtensor<[1,16,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,20,20],!torch.qint8>
%7017 = torch.aten.dequantize.self %7016 : !torch.vtensor<[1,16,20,20],!torch.qint8> -> !torch.vtensor<[1,16,20,20],f32>
%7018 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7019 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2339 = torch.constant.int 12
%7020 = torch.aten.item %7018 : !torch.vtensor<[],f32> -> !torch.float
%7021 = torch.aten.item %7019 : !torch.vtensor<[],si8> -> !torch.int
%7022 = torch.aten.quantize_per_tensor %206, %7020, %7021, %int12_2339 : !torch.vtensor<[16,16,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%7023 = torch.aten.int_repr %7022 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],si8>
%7024 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7025 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7026 = torch.aten.item %7024 : !torch.vtensor<[],f32> -> !torch.float
%7027 = torch.aten.item %7025 : !torch.vtensor<[],si8> -> !torch.int
%7028 = torch.aten._make_per_tensor_quantized_tensor %7023, %7026, %7027 : !torch.vtensor<[16,16,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%7029 = torch.aten.dequantize.self %7028 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],f32>
%7030 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7031 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2340 = torch.constant.int 12
%7032 = torch.aten.item %7030 : !torch.vtensor<[],f32> -> !torch.float
%7033 = torch.aten.item %7031 : !torch.vtensor<[],si8> -> !torch.int
%7034 = torch.aten.quantize_per_tensor %207, %7032, %7033, %int12_2340 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7035 = torch.aten.int_repr %7034 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%7036 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7037 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7038 = torch.aten.item %7036 : !torch.vtensor<[],f32> -> !torch.float
%7039 = torch.aten.item %7037 : !torch.vtensor<[],si8> -> !torch.int
%7040 = torch.aten._make_per_tensor_quantized_tensor %7035, %7038, %7039 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7041 = torch.aten.dequantize.self %7040 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2341 = torch.constant.int 1
%int1_2342 = torch.constant.int 1
%int1_2343 = torch.constant.int 1
%int1_2344 = torch.constant.int 1
%int1_2345 = torch.constant.int 1
%int1_2346 = torch.constant.int 1
%int0_2347 = torch.constant.int 0
%7042 = torch.prim.ListConstruct %int1_2341, %int1_2342 : (!torch.int, !torch.int) -> !torch.list<int>
%7043 = torch.prim.ListConstruct %int1_2343, %int1_2344 : (!torch.int, !torch.int) -> !torch.list<int>
%7044 = torch.prim.ListConstruct %int1_2345, %int1_2346 : (!torch.int, !torch.int) -> !torch.list<int>
%7045 = torch.prim.ListConstruct %int0_2347, %int0_2347 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2348 = torch.constant.bool false
%int1_2349 = torch.constant.int 1
%7046 = torch.aten.convolution %7017, %7029, %7041, %7044, %7042, %7043, %false_2348, %7045, %int1_2349 : !torch.vtensor<[1,16,20,20],f32>, !torch.vtensor<[16,16,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,20,20],f32>
%7047 = torch.aten.relu %7046 : !torch.vtensor<[1,16,20,20],f32> -> !torch.vtensor<[1,16,20,20],f32>
%7048 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7049 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2350 = torch.constant.int 12
%7050 = torch.aten.item %7048 : !torch.vtensor<[],f32> -> !torch.float
%7051 = torch.aten.item %7049 : !torch.vtensor<[],si8> -> !torch.int
%7052 = torch.aten.quantize_per_tensor %7047, %7050, %7051, %int12_2350 : !torch.vtensor<[1,16,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,20,20],!torch.qint8>
%7053 = torch.aten.int_repr %7052 : !torch.vtensor<[1,16,20,20],!torch.qint8> -> !torch.vtensor<[1,16,20,20],si8>
%7054 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7055 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7056 = torch.aten.item %7054 : !torch.vtensor<[],f32> -> !torch.float
%7057 = torch.aten.item %7055 : !torch.vtensor<[],si8> -> !torch.int
%7058 = torch.aten._make_per_tensor_quantized_tensor %7053, %7056, %7057 : !torch.vtensor<[1,16,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,20,20],!torch.qint8>
%7059 = torch.aten.dequantize.self %7058 : !torch.vtensor<[1,16,20,20],!torch.qint8> -> !torch.vtensor<[1,16,20,20],f32>
%int2_2351 = torch.constant.int 2
%int2_2352 = torch.constant.int 2
%7060 = torch.prim.ListConstruct %int2_2351, %int2_2352 : (!torch.int, !torch.int) -> !torch.list<int>
%int0_2353 = torch.constant.int 0
%int0_2354 = torch.constant.int 0
%7061 = torch.prim.ListConstruct %int0_2353, %int0_2354 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_2355 = torch.constant.int 2
%int2_2356 = torch.constant.int 2
%7062 = torch.prim.ListConstruct %int2_2355, %int2_2356 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_2357 = torch.constant.int 1
%int1_2358 = torch.constant.int 1
%7063 = torch.prim.ListConstruct %int1_2357, %int1_2358 : (!torch.int, !torch.int) -> !torch.list<int>
%true_2359 = torch.constant.bool true
%7064 = torch.aten.max_pool2d %7059, %7060, %7062, %7061, %7063, %true_2359 : !torch.vtensor<[1,16,20,20],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,16,10,10],f32>
%7065 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7066 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2360 = torch.constant.int 12
%7067 = torch.aten.item %7065 : !torch.vtensor<[],f32> -> !torch.float
%7068 = torch.aten.item %7066 : !torch.vtensor<[],si8> -> !torch.int
%7069 = torch.aten.quantize_per_tensor %7064, %7067, %7068, %int12_2360 : !torch.vtensor<[1,16,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,10,10],!torch.qint8>
%7070 = torch.aten.int_repr %7069 : !torch.vtensor<[1,16,10,10],!torch.qint8> -> !torch.vtensor<[1,16,10,10],si8>
%7071 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7072 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7073 = torch.aten.item %7071 : !torch.vtensor<[],f32> -> !torch.float
%7074 = torch.aten.item %7072 : !torch.vtensor<[],si8> -> !torch.int
%7075 = torch.aten._make_per_tensor_quantized_tensor %7070, %7073, %7074 : !torch.vtensor<[1,16,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,10,10],!torch.qint8>
%7076 = torch.aten.dequantize.self %7075 : !torch.vtensor<[1,16,10,10],!torch.qint8> -> !torch.vtensor<[1,16,10,10],f32>
%7077 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%7078 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2361 = torch.constant.int 12
%7079 = torch.aten.item %7077 : !torch.vtensor<[],f32> -> !torch.float
%7080 = torch.aten.item %7078 : !torch.vtensor<[],si8> -> !torch.int
%7081 = torch.aten.quantize_per_tensor %208, %7079, %7080, %int12_2361 : !torch.vtensor<[16,16,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%7082 = torch.aten.int_repr %7081 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],si8>
%7083 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%7084 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7085 = torch.aten.item %7083 : !torch.vtensor<[],f32> -> !torch.float
%7086 = torch.aten.item %7084 : !torch.vtensor<[],si8> -> !torch.int
%7087 = torch.aten._make_per_tensor_quantized_tensor %7082, %7085, %7086 : !torch.vtensor<[16,16,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%7088 = torch.aten.dequantize.self %7087 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],f32>
%7089 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7090 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2362 = torch.constant.int 12
%7091 = torch.aten.item %7089 : !torch.vtensor<[],f32> -> !torch.float
%7092 = torch.aten.item %7090 : !torch.vtensor<[],si8> -> !torch.int
%7093 = torch.aten.quantize_per_tensor %209, %7091, %7092, %int12_2362 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7094 = torch.aten.int_repr %7093 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%7095 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7096 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7097 = torch.aten.item %7095 : !torch.vtensor<[],f32> -> !torch.float
%7098 = torch.aten.item %7096 : !torch.vtensor<[],si8> -> !torch.int
%7099 = torch.aten._make_per_tensor_quantized_tensor %7094, %7097, %7098 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7100 = torch.aten.dequantize.self %7099 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2363 = torch.constant.int 1
%int1_2364 = torch.constant.int 1
%int1_2365 = torch.constant.int 1
%int1_2366 = torch.constant.int 1
%int1_2367 = torch.constant.int 1
%int1_2368 = torch.constant.int 1
%int0_2369 = torch.constant.int 0
%7101 = torch.prim.ListConstruct %int1_2363, %int1_2364 : (!torch.int, !torch.int) -> !torch.list<int>
%7102 = torch.prim.ListConstruct %int1_2365, %int1_2366 : (!torch.int, !torch.int) -> !torch.list<int>
%7103 = torch.prim.ListConstruct %int1_2367, %int1_2368 : (!torch.int, !torch.int) -> !torch.list<int>
%7104 = torch.prim.ListConstruct %int0_2369, %int0_2369 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2370 = torch.constant.bool false
%int1_2371 = torch.constant.int 1
%7105 = torch.aten.convolution %7076, %7088, %7100, %7103, %7101, %7102, %false_2370, %7104, %int1_2371 : !torch.vtensor<[1,16,10,10],f32>, !torch.vtensor<[16,16,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,10,10],f32>
%7106 = torch.aten.relu %7105 : !torch.vtensor<[1,16,10,10],f32> -> !torch.vtensor<[1,16,10,10],f32>
%7107 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7108 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2372 = torch.constant.int 12
%7109 = torch.aten.item %7107 : !torch.vtensor<[],f32> -> !torch.float
%7110 = torch.aten.item %7108 : !torch.vtensor<[],si8> -> !torch.int
%7111 = torch.aten.quantize_per_tensor %7106, %7109, %7110, %int12_2372 : !torch.vtensor<[1,16,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,10,10],!torch.qint8>
%7112 = torch.aten.int_repr %7111 : !torch.vtensor<[1,16,10,10],!torch.qint8> -> !torch.vtensor<[1,16,10,10],si8>
%7113 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7114 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7115 = torch.aten.item %7113 : !torch.vtensor<[],f32> -> !torch.float
%7116 = torch.aten.item %7114 : !torch.vtensor<[],si8> -> !torch.int
%7117 = torch.aten._make_per_tensor_quantized_tensor %7112, %7115, %7116 : !torch.vtensor<[1,16,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,10,10],!torch.qint8>
%7118 = torch.aten.dequantize.self %7117 : !torch.vtensor<[1,16,10,10],!torch.qint8> -> !torch.vtensor<[1,16,10,10],f32>
%7119 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7120 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2373 = torch.constant.int 12
%7121 = torch.aten.item %7119 : !torch.vtensor<[],f32> -> !torch.float
%7122 = torch.aten.item %7120 : !torch.vtensor<[],si8> -> !torch.int
%7123 = torch.aten.quantize_per_tensor %210, %7121, %7122, %int12_2373 : !torch.vtensor<[16,16,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%7124 = torch.aten.int_repr %7123 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],si8>
%7125 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7127 = torch.aten.item %7125 : !torch.vtensor<[],f32> -> !torch.float
%7128 = torch.aten.item %7126 : !torch.vtensor<[],si8> -> !torch.int
%7129 = torch.aten._make_per_tensor_quantized_tensor %7124, %7127, %7128 : !torch.vtensor<[16,16,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,3,3],!torch.qint8>
%7130 = torch.aten.dequantize.self %7129 : !torch.vtensor<[16,16,3,3],!torch.qint8> -> !torch.vtensor<[16,16,3,3],f32>
%7131 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7132 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2374 = torch.constant.int 12
%7133 = torch.aten.item %7131 : !torch.vtensor<[],f32> -> !torch.float
%7134 = torch.aten.item %7132 : !torch.vtensor<[],si8> -> !torch.int
%7135 = torch.aten.quantize_per_tensor %211, %7133, %7134, %int12_2374 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7136 = torch.aten.int_repr %7135 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%7137 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7138 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7139 = torch.aten.item %7137 : !torch.vtensor<[],f32> -> !torch.float
%7140 = torch.aten.item %7138 : !torch.vtensor<[],si8> -> !torch.int
%7141 = torch.aten._make_per_tensor_quantized_tensor %7136, %7139, %7140 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7142 = torch.aten.dequantize.self %7141 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int2_2375 = torch.constant.int 2
%int2_2376 = torch.constant.int 2
%int2_2377 = torch.constant.int 2
%int2_2378 = torch.constant.int 2
%int1_2379 = torch.constant.int 1
%int1_2380 = torch.constant.int 1
%int0_2381 = torch.constant.int 0
%7143 = torch.prim.ListConstruct %int2_2375, %int2_2376 : (!torch.int, !torch.int) -> !torch.list<int>
%7144 = torch.prim.ListConstruct %int2_2377, %int2_2378 : (!torch.int, !torch.int) -> !torch.list<int>
%7145 = torch.prim.ListConstruct %int1_2379, %int1_2380 : (!torch.int, !torch.int) -> !torch.list<int>
%7146 = torch.prim.ListConstruct %int0_2381, %int0_2381 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2382 = torch.constant.bool false
%int1_2383 = torch.constant.int 1
%7147 = torch.aten.convolution %7118, %7130, %7142, %7145, %7143, %7144, %false_2382, %7146, %int1_2383 : !torch.vtensor<[1,16,10,10],f32>, !torch.vtensor<[16,16,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,10,10],f32>
%7148 = torch.aten.relu %7147 : !torch.vtensor<[1,16,10,10],f32> -> !torch.vtensor<[1,16,10,10],f32>
%7149 = torch.prim.ListConstruct %7148, %7118 : (!torch.vtensor<[1,16,10,10],f32>, !torch.vtensor<[1,16,10,10],f32>) -> !torch.list<vtensor>
%int1_2384 = torch.constant.int 1
%7150 = torch.aten.cat %7149, %int1_2384 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,32,10,10],f32>
%7151 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2385 = torch.constant.int 12
%7153 = torch.aten.item %7151 : !torch.vtensor<[],f32> -> !torch.float
%7154 = torch.aten.item %7152 : !torch.vtensor<[],si8> -> !torch.int
%7155 = torch.aten.quantize_per_tensor %7150, %7153, %7154, %int12_2385 : !torch.vtensor<[1,32,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%7156 = torch.aten.int_repr %7155 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],si8>
%7157 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7159 = torch.aten.item %7157 : !torch.vtensor<[],f32> -> !torch.float
%7160 = torch.aten.item %7158 : !torch.vtensor<[],si8> -> !torch.int
%7161 = torch.aten._make_per_tensor_quantized_tensor %7156, %7159, %7160 : !torch.vtensor<[1,32,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,10,10],!torch.qint8>
%7162 = torch.aten.dequantize.self %7161 : !torch.vtensor<[1,32,10,10],!torch.qint8> -> !torch.vtensor<[1,32,10,10],f32>
%7163 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2386 = torch.constant.int 12
%7165 = torch.aten.item %7163 : !torch.vtensor<[],f32> -> !torch.float
%7166 = torch.aten.item %7164 : !torch.vtensor<[],si8> -> !torch.int
%7167 = torch.aten.quantize_per_tensor %212, %7165, %7166, %int12_2386 : !torch.vtensor<[16,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,32,3,3],!torch.qint8>
%7168 = torch.aten.int_repr %7167 : !torch.vtensor<[16,32,3,3],!torch.qint8> -> !torch.vtensor<[16,32,3,3],si8>
%7169 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7171 = torch.aten.item %7169 : !torch.vtensor<[],f32> -> !torch.float
%7172 = torch.aten.item %7170 : !torch.vtensor<[],si8> -> !torch.int
%7173 = torch.aten._make_per_tensor_quantized_tensor %7168, %7171, %7172 : !torch.vtensor<[16,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,32,3,3],!torch.qint8>
%7174 = torch.aten.dequantize.self %7173 : !torch.vtensor<[16,32,3,3],!torch.qint8> -> !torch.vtensor<[16,32,3,3],f32>
%7175 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7176 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2387 = torch.constant.int 12
%7177 = torch.aten.item %7175 : !torch.vtensor<[],f32> -> !torch.float
%7178 = torch.aten.item %7176 : !torch.vtensor<[],si8> -> !torch.int
%7179 = torch.aten.quantize_per_tensor %213, %7177, %7178, %int12_2387 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7180 = torch.aten.int_repr %7179 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%7181 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7183 = torch.aten.item %7181 : !torch.vtensor<[],f32> -> !torch.float
%7184 = torch.aten.item %7182 : !torch.vtensor<[],si8> -> !torch.int
%7185 = torch.aten._make_per_tensor_quantized_tensor %7180, %7183, %7184 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7186 = torch.aten.dequantize.self %7185 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2388 = torch.constant.int 1
%int1_2389 = torch.constant.int 1
%int1_2390 = torch.constant.int 1
%int1_2391 = torch.constant.int 1
%int1_2392 = torch.constant.int 1
%int1_2393 = torch.constant.int 1
%int0_2394 = torch.constant.int 0
%7187 = torch.prim.ListConstruct %int1_2388, %int1_2389 : (!torch.int, !torch.int) -> !torch.list<int>
%7188 = torch.prim.ListConstruct %int1_2390, %int1_2391 : (!torch.int, !torch.int) -> !torch.list<int>
%7189 = torch.prim.ListConstruct %int1_2392, %int1_2393 : (!torch.int, !torch.int) -> !torch.list<int>
%7190 = torch.prim.ListConstruct %int0_2394, %int0_2394 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2395 = torch.constant.bool false
%int1_2396 = torch.constant.int 1
%7191 = torch.aten.convolution %7162, %7174, %7186, %7189, %7187, %7188, %false_2395, %7190, %int1_2396 : !torch.vtensor<[1,32,10,10],f32>, !torch.vtensor<[16,32,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,10,10],f32>
%7192 = torch.aten.relu %7191 : !torch.vtensor<[1,16,10,10],f32> -> !torch.vtensor<[1,16,10,10],f32>
%7193 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2397 = torch.constant.int 12
%7195 = torch.aten.item %7193 : !torch.vtensor<[],f32> -> !torch.float
%7196 = torch.aten.item %7194 : !torch.vtensor<[],si8> -> !torch.int
%7197 = torch.aten.quantize_per_tensor %7192, %7195, %7196, %int12_2397 : !torch.vtensor<[1,16,10,10],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,10,10],!torch.qint8>
%7198 = torch.aten.int_repr %7197 : !torch.vtensor<[1,16,10,10],!torch.qint8> -> !torch.vtensor<[1,16,10,10],si8>
%7199 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7201 = torch.aten.item %7199 : !torch.vtensor<[],f32> -> !torch.float
%7202 = torch.aten.item %7200 : !torch.vtensor<[],si8> -> !torch.int
%7203 = torch.aten._make_per_tensor_quantized_tensor %7198, %7201, %7202 : !torch.vtensor<[1,16,10,10],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,10,10],!torch.qint8>
%7204 = torch.aten.dequantize.self %7203 : !torch.vtensor<[1,16,10,10],!torch.qint8> -> !torch.vtensor<[1,16,10,10],f32>
%7205 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%7206 = torch.vtensor.literal(dense<20> : tensor<si64>) : !torch.vtensor<[],si64>
%7207 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2398 = torch.constant.int 0
%int0_2399 = torch.constant.int 0
%int0_2400 = torch.constant.int 0
%7208 = torch.aten.select.int %7207, %int0_2398, %int0_2400 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7209 = torch.aten.item %7208 : !torch.vtensor<[1],si64> -> !torch.int
%7210 = torch.aten.lt.int %7209, %int0_2398 : !torch.int, !torch.int -> !torch.bool
%7211 = torch.aten.Int.bool %7210 : !torch.bool -> !torch.int
%7212 = torch.aten.mul.int %7211, %int0_2399 : !torch.int, !torch.int -> !torch.int
%7213 = torch.aten.add.int %7209, %7212 : !torch.int, !torch.int -> !torch.int
%7214 = torch.prim.ListConstruct %7213 : (!torch.int) -> !torch.list<int>
%false_2401 = torch.constant.bool false
%none_2402 = torch.constant.none
%7215 = torch.aten.tensor %7214, %none_2402, %none_2402, %false_2401 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2403, %indices_2404 = torch.aten.sort %7215, %int0_2398, %false_2401 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2405 = torch.constant.int 0
%7216 = torch.aten.select.int %values_2403, %int0_2398, %int0_2405 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7217 = torch.aten.item %7216 : !torch.vtensor<[1],si64> -> !torch.int
%7218 = torch.aten.unsqueeze %7205, %7217 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%7219 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2406 = torch.constant.int 0
%int0_2407 = torch.constant.int 0
%int0_2408 = torch.constant.int 0
%7220 = torch.aten.select.int %7219, %int0_2406, %int0_2408 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7221 = torch.aten.item %7220 : !torch.vtensor<[1],si64> -> !torch.int
%7222 = torch.aten.lt.int %7221, %int0_2406 : !torch.int, !torch.int -> !torch.bool
%7223 = torch.aten.Int.bool %7222 : !torch.bool -> !torch.int
%7224 = torch.aten.mul.int %7223, %int0_2407 : !torch.int, !torch.int -> !torch.int
%7225 = torch.aten.add.int %7221, %7224 : !torch.int, !torch.int -> !torch.int
%7226 = torch.prim.ListConstruct %7225 : (!torch.int) -> !torch.list<int>
%false_2409 = torch.constant.bool false
%none_2410 = torch.constant.none
%7227 = torch.aten.tensor %7226, %none_2410, %none_2410, %false_2409 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2411, %indices_2412 = torch.aten.sort %7227, %int0_2406, %false_2409 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2413 = torch.constant.int 0
%7228 = torch.aten.select.int %values_2411, %int0_2406, %int0_2413 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7229 = torch.aten.item %7228 : !torch.vtensor<[1],si64> -> !torch.int
%7230 = torch.aten.unsqueeze %7206, %7229 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%7231 = torch.prim.ListConstruct %7218, %7230 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2414 = torch.constant.int 0
%7232 = torch.aten.cat %7231, %int0_2414 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%7233 = torch.aten._shape_as_tensor %7204 : !torch.vtensor<[1,16,10,10],f32> -> !torch.vtensor<[4],si64>
%7234 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%7235 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%7236 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2415 = torch.constant.none
%int1_2416 = torch.constant.int 1
%7237 = torch.prim.ListConstruct %int1_2416 : (!torch.int) -> !torch.list<int>
%7238 = torch.aten.ones %7237, %none_2415, %none_2415, %none_2415, %none_2415 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2417 = torch.constant.int 0
%int0_2418 = torch.constant.int 0
%7239 = torch.prim.NumToTensor.Scalar %int0_2418 : !torch.int -> !torch.vtensor<[1],si64>
%7240 = torch.aten.index_select %7235, %int0_2417, %7239 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7241 = torch.aten.item %7240 : !torch.vtensor<[1],si64> -> !torch.int
%7242 = torch.aten.index_select %7236, %int0_2417, %7239 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7243 = torch.aten.item %7242 : !torch.vtensor<[1],si64> -> !torch.int
%7244 = torch.aten.index_select %7234, %int0_2417, %7239 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7245 = torch.aten.item %7244 : !torch.vtensor<[1],si64> -> !torch.int
%7246 = torch.aten.index_select %7238, %int0_2417, %7239 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7247 = torch.aten.item %7246 : !torch.vtensor<[1],si64> -> !torch.int
%7248 = torch.aten.slice.Tensor %7233, %7245, %7241, %7243, %7247 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2419 = torch.constant.int 4
%none_2420 = torch.constant.none
%false_2421 = torch.constant.bool false
%7249 = torch.aten.to.dtype %7232, %int4_2419, %false_2421, %false_2421, %none_2420 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%7250 = torch.prim.ListConstruct %7248, %7249 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2422 = torch.constant.int 0
%7251 = torch.aten.cat %7250, %int0_2422 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%7252 = torch.operator "onnx.Resize"(%7204, %none, %none, %7251) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,16,10,10],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%7253 = torch.prim.ListConstruct %7252, %7059 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,16,20,20],f32>) -> !torch.list<vtensor>
%int1_2423 = torch.constant.int 1
%7254 = torch.aten.cat %7253, %int1_2423 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,20,20],f32>
%7255 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7256 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2424 = torch.constant.int 12
%7257 = torch.aten.item %7255 : !torch.vtensor<[],f32> -> !torch.float
%7258 = torch.aten.item %7256 : !torch.vtensor<[],si8> -> !torch.int
%7259 = torch.aten.quantize_per_tensor %7254, %7257, %7258, %int12_2424 : !torch.vtensor<[1,?,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%7260 = torch.aten.int_repr %7259 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],si8>
%7261 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7262 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7263 = torch.aten.item %7261 : !torch.vtensor<[],f32> -> !torch.float
%7264 = torch.aten.item %7262 : !torch.vtensor<[],si8> -> !torch.int
%7265 = torch.aten._make_per_tensor_quantized_tensor %7260, %7263, %7264 : !torch.vtensor<[1,?,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,20,20],!torch.qint8>
%7266 = torch.aten.dequantize.self %7265 : !torch.vtensor<[1,?,20,20],!torch.qint8> -> !torch.vtensor<[1,?,20,20],f32>
%7267 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7268 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2425 = torch.constant.int 12
%7269 = torch.aten.item %7267 : !torch.vtensor<[],f32> -> !torch.float
%7270 = torch.aten.item %7268 : !torch.vtensor<[],si8> -> !torch.int
%7271 = torch.aten.quantize_per_tensor %214, %7269, %7270, %int12_2425 : !torch.vtensor<[16,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,32,3,3],!torch.qint8>
%7272 = torch.aten.int_repr %7271 : !torch.vtensor<[16,32,3,3],!torch.qint8> -> !torch.vtensor<[16,32,3,3],si8>
%7273 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7274 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7275 = torch.aten.item %7273 : !torch.vtensor<[],f32> -> !torch.float
%7276 = torch.aten.item %7274 : !torch.vtensor<[],si8> -> !torch.int
%7277 = torch.aten._make_per_tensor_quantized_tensor %7272, %7275, %7276 : !torch.vtensor<[16,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,32,3,3],!torch.qint8>
%7278 = torch.aten.dequantize.self %7277 : !torch.vtensor<[16,32,3,3],!torch.qint8> -> !torch.vtensor<[16,32,3,3],f32>
%7279 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7280 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2426 = torch.constant.int 12
%7281 = torch.aten.item %7279 : !torch.vtensor<[],f32> -> !torch.float
%7282 = torch.aten.item %7280 : !torch.vtensor<[],si8> -> !torch.int
%7283 = torch.aten.quantize_per_tensor %215, %7281, %7282, %int12_2426 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7284 = torch.aten.int_repr %7283 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%7285 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7286 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7287 = torch.aten.item %7285 : !torch.vtensor<[],f32> -> !torch.float
%7288 = torch.aten.item %7286 : !torch.vtensor<[],si8> -> !torch.int
%7289 = torch.aten._make_per_tensor_quantized_tensor %7284, %7287, %7288 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7290 = torch.aten.dequantize.self %7289 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2427 = torch.constant.int 1
%int1_2428 = torch.constant.int 1
%int1_2429 = torch.constant.int 1
%int1_2430 = torch.constant.int 1
%int1_2431 = torch.constant.int 1
%int1_2432 = torch.constant.int 1
%int0_2433 = torch.constant.int 0
%7291 = torch.prim.ListConstruct %int1_2427, %int1_2428 : (!torch.int, !torch.int) -> !torch.list<int>
%7292 = torch.prim.ListConstruct %int1_2429, %int1_2430 : (!torch.int, !torch.int) -> !torch.list<int>
%7293 = torch.prim.ListConstruct %int1_2431, %int1_2432 : (!torch.int, !torch.int) -> !torch.list<int>
%7294 = torch.prim.ListConstruct %int0_2433, %int0_2433 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2434 = torch.constant.bool false
%int1_2435 = torch.constant.int 1
%7295 = torch.aten.convolution %7266, %7278, %7290, %7293, %7291, %7292, %false_2434, %7294, %int1_2435 : !torch.vtensor<[1,?,20,20],f32>, !torch.vtensor<[16,32,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,20,20],f32>
%7296 = torch.aten.relu %7295 : !torch.vtensor<[1,16,20,20],f32> -> !torch.vtensor<[1,16,20,20],f32>
%7297 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7298 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2436 = torch.constant.int 12
%7299 = torch.aten.item %7297 : !torch.vtensor<[],f32> -> !torch.float
%7300 = torch.aten.item %7298 : !torch.vtensor<[],si8> -> !torch.int
%7301 = torch.aten.quantize_per_tensor %7296, %7299, %7300, %int12_2436 : !torch.vtensor<[1,16,20,20],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,20,20],!torch.qint8>
%7302 = torch.aten.int_repr %7301 : !torch.vtensor<[1,16,20,20],!torch.qint8> -> !torch.vtensor<[1,16,20,20],si8>
%7303 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7305 = torch.aten.item %7303 : !torch.vtensor<[],f32> -> !torch.float
%7306 = torch.aten.item %7304 : !torch.vtensor<[],si8> -> !torch.int
%7307 = torch.aten._make_per_tensor_quantized_tensor %7302, %7305, %7306 : !torch.vtensor<[1,16,20,20],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,20,20],!torch.qint8>
%7308 = torch.aten.dequantize.self %7307 : !torch.vtensor<[1,16,20,20],!torch.qint8> -> !torch.vtensor<[1,16,20,20],f32>
%7309 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%7310 = torch.vtensor.literal(dense<40> : tensor<si64>) : !torch.vtensor<[],si64>
%7311 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2437 = torch.constant.int 0
%int0_2438 = torch.constant.int 0
%int0_2439 = torch.constant.int 0
%7312 = torch.aten.select.int %7311, %int0_2437, %int0_2439 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7313 = torch.aten.item %7312 : !torch.vtensor<[1],si64> -> !torch.int
%7314 = torch.aten.lt.int %7313, %int0_2437 : !torch.int, !torch.int -> !torch.bool
%7315 = torch.aten.Int.bool %7314 : !torch.bool -> !torch.int
%7316 = torch.aten.mul.int %7315, %int0_2438 : !torch.int, !torch.int -> !torch.int
%7317 = torch.aten.add.int %7313, %7316 : !torch.int, !torch.int -> !torch.int
%7318 = torch.prim.ListConstruct %7317 : (!torch.int) -> !torch.list<int>
%false_2440 = torch.constant.bool false
%none_2441 = torch.constant.none
%7319 = torch.aten.tensor %7318, %none_2441, %none_2441, %false_2440 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2442, %indices_2443 = torch.aten.sort %7319, %int0_2437, %false_2440 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2444 = torch.constant.int 0
%7320 = torch.aten.select.int %values_2442, %int0_2437, %int0_2444 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7321 = torch.aten.item %7320 : !torch.vtensor<[1],si64> -> !torch.int
%7322 = torch.aten.unsqueeze %7309, %7321 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%7323 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_2445 = torch.constant.int 0
%int0_2446 = torch.constant.int 0
%int0_2447 = torch.constant.int 0
%7324 = torch.aten.select.int %7323, %int0_2445, %int0_2447 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7325 = torch.aten.item %7324 : !torch.vtensor<[1],si64> -> !torch.int
%7326 = torch.aten.lt.int %7325, %int0_2445 : !torch.int, !torch.int -> !torch.bool
%7327 = torch.aten.Int.bool %7326 : !torch.bool -> !torch.int
%7328 = torch.aten.mul.int %7327, %int0_2446 : !torch.int, !torch.int -> !torch.int
%7329 = torch.aten.add.int %7325, %7328 : !torch.int, !torch.int -> !torch.int
%7330 = torch.prim.ListConstruct %7329 : (!torch.int) -> !torch.list<int>
%false_2448 = torch.constant.bool false
%none_2449 = torch.constant.none
%7331 = torch.aten.tensor %7330, %none_2449, %none_2449, %false_2448 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_2450, %indices_2451 = torch.aten.sort %7331, %int0_2445, %false_2448 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_2452 = torch.constant.int 0
%7332 = torch.aten.select.int %values_2450, %int0_2445, %int0_2452 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%7333 = torch.aten.item %7332 : !torch.vtensor<[1],si64> -> !torch.int
%7334 = torch.aten.unsqueeze %7310, %7333 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%7335 = torch.prim.ListConstruct %7322, %7334 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_2453 = torch.constant.int 0
%7336 = torch.aten.cat %7335, %int0_2453 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%7337 = torch.aten._shape_as_tensor %7308 : !torch.vtensor<[1,16,20,20],f32> -> !torch.vtensor<[4],si64>
%7338 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%7339 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%7340 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_2454 = torch.constant.none
%int1_2455 = torch.constant.int 1
%7341 = torch.prim.ListConstruct %int1_2455 : (!torch.int) -> !torch.list<int>
%7342 = torch.aten.ones %7341, %none_2454, %none_2454, %none_2454, %none_2454 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_2456 = torch.constant.int 0
%int0_2457 = torch.constant.int 0
%7343 = torch.prim.NumToTensor.Scalar %int0_2457 : !torch.int -> !torch.vtensor<[1],si64>
%7344 = torch.aten.index_select %7339, %int0_2456, %7343 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7345 = torch.aten.item %7344 : !torch.vtensor<[1],si64> -> !torch.int
%7346 = torch.aten.index_select %7340, %int0_2456, %7343 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7347 = torch.aten.item %7346 : !torch.vtensor<[1],si64> -> !torch.int
%7348 = torch.aten.index_select %7338, %int0_2456, %7343 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7349 = torch.aten.item %7348 : !torch.vtensor<[1],si64> -> !torch.int
%7350 = torch.aten.index_select %7342, %int0_2456, %7343 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%7351 = torch.aten.item %7350 : !torch.vtensor<[1],si64> -> !torch.int
%7352 = torch.aten.slice.Tensor %7337, %7349, %7345, %7347, %7351 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_2458 = torch.constant.int 4
%none_2459 = torch.constant.none
%false_2460 = torch.constant.bool false
%7353 = torch.aten.to.dtype %7336, %int4_2458, %false_2460, %false_2460, %none_2459 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%7354 = torch.prim.ListConstruct %7352, %7353 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_2461 = torch.constant.int 0
%7355 = torch.aten.cat %7354, %int0_2461 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%7356 = torch.operator "onnx.Resize"(%7308, %none, %none, %7355) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,16,20,20],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%7357 = torch.prim.ListConstruct %7356, %7000 : (!torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[1,16,40,40],f32>) -> !torch.list<vtensor>
%int1_2462 = torch.constant.int 1
%7358 = torch.aten.cat %7357, %int1_2462 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,?,40,40],f32>
%7359 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2463 = torch.constant.int 12
%7361 = torch.aten.item %7359 : !torch.vtensor<[],f32> -> !torch.float
%7362 = torch.aten.item %7360 : !torch.vtensor<[],si8> -> !torch.int
%7363 = torch.aten.quantize_per_tensor %7358, %7361, %7362, %int12_2463 : !torch.vtensor<[1,?,40,40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%7364 = torch.aten.int_repr %7363 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],si8>
%7365 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7366 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7367 = torch.aten.item %7365 : !torch.vtensor<[],f32> -> !torch.float
%7368 = torch.aten.item %7366 : !torch.vtensor<[],si8> -> !torch.int
%7369 = torch.aten._make_per_tensor_quantized_tensor %7364, %7367, %7368 : !torch.vtensor<[1,?,40,40],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,?,40,40],!torch.qint8>
%7370 = torch.aten.dequantize.self %7369 : !torch.vtensor<[1,?,40,40],!torch.qint8> -> !torch.vtensor<[1,?,40,40],f32>
%7371 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7372 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2464 = torch.constant.int 12
%7373 = torch.aten.item %7371 : !torch.vtensor<[],f32> -> !torch.float
%7374 = torch.aten.item %7372 : !torch.vtensor<[],si8> -> !torch.int
%7375 = torch.aten.quantize_per_tensor %216, %7373, %7374, %int12_2464 : !torch.vtensor<[16,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,32,3,3],!torch.qint8>
%7376 = torch.aten.int_repr %7375 : !torch.vtensor<[16,32,3,3],!torch.qint8> -> !torch.vtensor<[16,32,3,3],si8>
%7377 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%7378 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7379 = torch.aten.item %7377 : !torch.vtensor<[],f32> -> !torch.float
%7380 = torch.aten.item %7378 : !torch.vtensor<[],si8> -> !torch.int
%7381 = torch.aten._make_per_tensor_quantized_tensor %7376, %7379, %7380 : !torch.vtensor<[16,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,32,3,3],!torch.qint8>
%7382 = torch.aten.dequantize.self %7381 : !torch.vtensor<[16,32,3,3],!torch.qint8> -> !torch.vtensor<[16,32,3,3],f32>
%7383 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7384 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_2465 = torch.constant.int 12
%7385 = torch.aten.item %7383 : !torch.vtensor<[],f32> -> !torch.float
%7386 = torch.aten.item %7384 : !torch.vtensor<[],si8> -> !torch.int
%7387 = torch.aten.quantize_per_tensor %217, %7385, %7386, %int12_2465 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7388 = torch.aten.int_repr %7387 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%7389 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%7390 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%7391 = torch.aten.item %7389 : !torch.vtensor<[],f32> -> !torch.float
%7392 = torch.aten.item %7390 : !torch.vtensor<[],si8> -> !torch.int
%7393 = torch.aten._make_per_tensor_quantized_tensor %7388, %7391, %7392 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%7394 = torch.aten.dequantize.self %7393 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_2466 = torch.constant.int 1
%int1_2467 = torch.constant.int 1
%int1_2468 = torch.constant.int 1
%int1_2469 = torch.constant.int 1
%int1_2470 = torch.constant.int 1
%int1_2471 = torch.constant.int 1
%int0_2472 = torch.constant.int 0
%7395 = torch.prim.ListConstruct %int1_2466, %int1_2467 : (!torch.int, !torch.int) -> !torch.list<int>
%7396 = torch.prim.ListConstruct %int1_2468, %int1_2469 : (!torch.int, !torch.int) -> !torch.list<int>
%7397 = torch.prim.ListConstruct %int1_2470, %int1_2471 : (!torch.int, !torch.int) -> !torch.list<int>
%7398 = torch.prim.ListConstruct %int0_2472, %int0_2472 : (!torch.int, !torch.int) -> !torch.list<int>
%false_2473 = torch.constant.bool false
%int1_2474 = torch.constant.int 1
%7399 = torch.aten.convolution %7370, %7382, %7394, %7397, %7395, %7396, %false_2473, %7398, %int1_2474 : !torch.vtensor<[1,?,40,40],f32>, !torch.vtensor<[16,32,3,3],f32>, !torch.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment