Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save AmosLewis/cb9d7a443e66997a57e2297cf571293a to your computer and use it in GitHub Desktop.
Save AmosLewis/cb9d7a443e66997a57e2297cf571293a to your computer and use it in GitHub Desktop.
module {
func.func @torch_jit(%arg0: !torch.vtensor<[32,3,256,256],f32>) -> !torch.vtensor<[32,1,256,256],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} {
%float1.250000e-01 = torch.constant.float 1.250000e-01
%float4.882810e-04 = torch.constant.float 4.8828125E-4
%float3.125000e-02 = torch.constant.float 3.125000e-02
%float1.953130e-03 = torch.constant.float 0.001953125
%float3.906250e-03 = torch.constant.float 3.906250e-03
%float9.765620e-04 = torch.constant.float 9.765625E-4
%float7.812500e-03 = torch.constant.float 7.812500e-03
%true = torch.constant.bool true
%int2 = torch.constant.int 2
%false = torch.constant.bool false
%int0 = torch.constant.int 0
%int1 = torch.constant.int 1
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x3x3x3xf32>) : !torch.vtensor<[32,3,3,3],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x32x3x3xf32>) : !torch.vtensor<[64,32,3,3],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x64x3x3xf32>) : !torch.vtensor<[128,64,3,3],f32>
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x128x3x3xf32>) : !torch.vtensor<[256,128,3,3],f32>
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x256x3x3xf32>) : !torch.vtensor<[512,256,3,3],f32>
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x256x2x2xf32>) : !torch.vtensor<[512,256,2,2],f32>
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32>
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x128x2x2xf32>) : !torch.vtensor<[256,128,2,2],f32>
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32>
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x64x2x2xf32>) : !torch.vtensor<[128,64,2,2],f32>
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x128x3x3xf32>) : !torch.vtensor<[64,128,3,3],f32>
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x32x2x2xf32>) : !torch.vtensor<[64,32,2,2],f32>
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32>
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32>
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x32x1x1xf32>) : !torch.vtensor<[1,32,1,1],f32>
%45 = torch.vtensor.literal(dense<-0.3984375> : tensor<1xf32>) : !torch.vtensor<[1],f32>
%int12 = torch.constant.int 12
%float1.562500e-02 = torch.constant.float 1.562500e-02
%46 = torch.aten.quantize_per_tensor %arg0, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,3,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,3,256,256],!torch.qint8>
%47 = torch.aten.int_repr %46 : !torch.vtensor<[32,3,256,256],!torch.qint8> -> !torch.vtensor<[32,3,256,256],si8>
%48 = torch.aten._make_per_tensor_quantized_tensor %47, %float1.562500e-02, %int0 : !torch.vtensor<[32,3,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,3,256,256],!torch.qint8>
%49 = torch.aten.dequantize.self %48 : !torch.vtensor<[32,3,256,256],!torch.qint8> -> !torch.vtensor<[32,3,256,256],f32>
%50 = torch.aten.quantize_per_tensor %0, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[32,3,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,3,3,3],!torch.qint8>
%51 = torch.aten.int_repr %50 : !torch.vtensor<[32,3,3,3],!torch.qint8> -> !torch.vtensor<[32,3,3,3],si8>
%52 = torch.aten._make_per_tensor_quantized_tensor %51, %float7.812500e-03, %int0 : !torch.vtensor<[32,3,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,3,3,3],!torch.qint8>
%53 = torch.aten.dequantize.self %52 : !torch.vtensor<[32,3,3,3],!torch.qint8> -> !torch.vtensor<[32,3,3,3],f32>
%54 = torch.aten.quantize_per_tensor %1, %float9.765620e-04, %int0, %int12 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%55 = torch.aten.int_repr %54 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%56 = torch.aten._make_per_tensor_quantized_tensor %55, %float9.765620e-04, %int0 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%57 = torch.aten.dequantize.self %56 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%58 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%59 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%60 = torch.aten.convolution %49, %53, %57, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,3,256,256],f32>, !torch.vtensor<[32,3,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,256,256],f32>
%61 = torch.aten.relu %60 : !torch.vtensor<[32,32,256,256],f32> -> !torch.vtensor<[32,32,256,256],f32>
%62 = torch.aten.quantize_per_tensor %61, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[32,32,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%63 = torch.aten.int_repr %62 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],si8>
%64 = torch.aten._make_per_tensor_quantized_tensor %63, %float7.812500e-03, %int0 : !torch.vtensor<[32,32,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%65 = torch.aten.dequantize.self %64 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],f32>
%66 = torch.aten.quantize_per_tensor %2, %float3.906250e-03, %int0, %int12 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%67 = torch.aten.int_repr %66 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%68 = torch.aten._make_per_tensor_quantized_tensor %67, %float3.906250e-03, %int0 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%69 = torch.aten.dequantize.self %68 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%70 = torch.aten.quantize_per_tensor %3, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%71 = torch.aten.int_repr %70 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%72 = torch.aten._make_per_tensor_quantized_tensor %71, %float7.812500e-03, %int0 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%73 = torch.aten.dequantize.self %72 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%74 = torch.aten.convolution %65, %69, %73, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,32,256,256],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,256,256],f32>
%75 = torch.aten.relu %74 : !torch.vtensor<[32,32,256,256],f32> -> !torch.vtensor<[32,32,256,256],f32>
%76 = torch.aten.quantize_per_tensor %75, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,32,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%77 = torch.aten.int_repr %76 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],si8>
%78 = torch.aten._make_per_tensor_quantized_tensor %77, %float1.562500e-02, %int0 : !torch.vtensor<[32,32,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%79 = torch.aten.dequantize.self %78 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],f32>
%80 = torch.prim.ListConstruct %int2, %int2 : (!torch.int, !torch.int) -> !torch.list<int>
%81 = torch.aten.max_pool2d %79, %80, %80, %59, %58, %false : !torch.vtensor<[32,32,256,256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,32,128,128],f32>
%82 = torch.aten.quantize_per_tensor %81, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,32,128,128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,128,128],!torch.qint8>
%83 = torch.aten.int_repr %82 : !torch.vtensor<[32,32,128,128],!torch.qint8> -> !torch.vtensor<[32,32,128,128],si8>
%84 = torch.aten._make_per_tensor_quantized_tensor %83, %float1.562500e-02, %int0 : !torch.vtensor<[32,32,128,128],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,128,128],!torch.qint8>
%85 = torch.aten.dequantize.self %84 : !torch.vtensor<[32,32,128,128],!torch.qint8> -> !torch.vtensor<[32,32,128,128],f32>
%86 = torch.aten.quantize_per_tensor %4, %float3.906250e-03, %int0, %int12 : !torch.vtensor<[64,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,32,3,3],!torch.qint8>
%87 = torch.aten.int_repr %86 : !torch.vtensor<[64,32,3,3],!torch.qint8> -> !torch.vtensor<[64,32,3,3],si8>
%88 = torch.aten._make_per_tensor_quantized_tensor %87, %float3.906250e-03, %int0 : !torch.vtensor<[64,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,32,3,3],!torch.qint8>
%89 = torch.aten.dequantize.self %88 : !torch.vtensor<[64,32,3,3],!torch.qint8> -> !torch.vtensor<[64,32,3,3],f32>
%90 = torch.aten.quantize_per_tensor %5, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%91 = torch.aten.int_repr %90 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%92 = torch.aten._make_per_tensor_quantized_tensor %91, %float7.812500e-03, %int0 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%93 = torch.aten.dequantize.self %92 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%94 = torch.aten.convolution %85, %89, %93, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,32,128,128],f32>, !torch.vtensor<[64,32,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,128,128],f32>
%95 = torch.aten.relu %94 : !torch.vtensor<[32,64,128,128],f32> -> !torch.vtensor<[32,64,128,128],f32>
%96 = torch.aten.quantize_per_tensor %95, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,64,128,128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%97 = torch.aten.int_repr %96 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],si8>
%98 = torch.aten._make_per_tensor_quantized_tensor %97, %float1.562500e-02, %int0 : !torch.vtensor<[32,64,128,128],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%99 = torch.aten.dequantize.self %98 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],f32>
%100 = torch.aten.quantize_per_tensor %6, %float3.906250e-03, %int0, %int12 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%101 = torch.aten.int_repr %100 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%102 = torch.aten._make_per_tensor_quantized_tensor %101, %float3.906250e-03, %int0 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%103 = torch.aten.dequantize.self %102 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%104 = torch.aten.quantize_per_tensor %7, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%105 = torch.aten.int_repr %104 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%106 = torch.aten._make_per_tensor_quantized_tensor %105, %float7.812500e-03, %int0 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%107 = torch.aten.dequantize.self %106 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%108 = torch.aten.convolution %99, %103, %107, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,64,128,128],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,128,128],f32>
%109 = torch.aten.relu %108 : !torch.vtensor<[32,64,128,128],f32> -> !torch.vtensor<[32,64,128,128],f32>
%110 = torch.aten.quantize_per_tensor %109, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,64,128,128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%111 = torch.aten.int_repr %110 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],si8>
%112 = torch.aten._make_per_tensor_quantized_tensor %111, %float1.562500e-02, %int0 : !torch.vtensor<[32,64,128,128],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%113 = torch.aten.dequantize.self %112 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],f32>
%114 = torch.aten.max_pool2d %113, %80, %80, %59, %58, %false : !torch.vtensor<[32,64,128,128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,64,64,64],f32>
%115 = torch.aten.quantize_per_tensor %114, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,64,64,64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,64,64],!torch.qint8>
%116 = torch.aten.int_repr %115 : !torch.vtensor<[32,64,64,64],!torch.qint8> -> !torch.vtensor<[32,64,64,64],si8>
%117 = torch.aten._make_per_tensor_quantized_tensor %116, %float1.562500e-02, %int0 : !torch.vtensor<[32,64,64,64],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,64,64],!torch.qint8>
%118 = torch.aten.dequantize.self %117 : !torch.vtensor<[32,64,64,64],!torch.qint8> -> !torch.vtensor<[32,64,64,64],f32>
%119 = torch.aten.quantize_per_tensor %8, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[128,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,64,3,3],!torch.qint8>
%120 = torch.aten.int_repr %119 : !torch.vtensor<[128,64,3,3],!torch.qint8> -> !torch.vtensor<[128,64,3,3],si8>
%121 = torch.aten._make_per_tensor_quantized_tensor %120, %float1.953130e-03, %int0 : !torch.vtensor<[128,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,64,3,3],!torch.qint8>
%122 = torch.aten.dequantize.self %121 : !torch.vtensor<[128,64,3,3],!torch.qint8> -> !torch.vtensor<[128,64,3,3],f32>
%123 = torch.aten.quantize_per_tensor %9, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%124 = torch.aten.int_repr %123 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%125 = torch.aten._make_per_tensor_quantized_tensor %124, %float7.812500e-03, %int0 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%126 = torch.aten.dequantize.self %125 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%127 = torch.aten.convolution %118, %122, %126, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,64,64,64],f32>, !torch.vtensor<[128,64,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,64,64],f32>
%128 = torch.aten.relu %127 : !torch.vtensor<[32,128,64,64],f32> -> !torch.vtensor<[32,128,64,64],f32>
%129 = torch.aten.quantize_per_tensor %128, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,128,64,64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%130 = torch.aten.int_repr %129 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],si8>
%131 = torch.aten._make_per_tensor_quantized_tensor %130, %float1.562500e-02, %int0 : !torch.vtensor<[32,128,64,64],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%132 = torch.aten.dequantize.self %131 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],f32>
%133 = torch.aten.quantize_per_tensor %10, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%134 = torch.aten.int_repr %133 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%135 = torch.aten._make_per_tensor_quantized_tensor %134, %float1.953130e-03, %int0 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%136 = torch.aten.dequantize.self %135 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%137 = torch.aten.quantize_per_tensor %11, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%138 = torch.aten.int_repr %137 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%139 = torch.aten._make_per_tensor_quantized_tensor %138, %float1.562500e-02, %int0 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%140 = torch.aten.dequantize.self %139 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%141 = torch.aten.convolution %132, %136, %140, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,128,64,64],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,64,64],f32>
%142 = torch.aten.relu %141 : !torch.vtensor<[32,128,64,64],f32> -> !torch.vtensor<[32,128,64,64],f32>
%143 = torch.aten.quantize_per_tensor %142, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,128,64,64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%144 = torch.aten.int_repr %143 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],si8>
%145 = torch.aten._make_per_tensor_quantized_tensor %144, %float3.125000e-02, %int0 : !torch.vtensor<[32,128,64,64],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%146 = torch.aten.dequantize.self %145 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],f32>
%147 = torch.aten.max_pool2d %146, %80, %80, %59, %58, %false : !torch.vtensor<[32,128,64,64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,128,32,32],f32>
%148 = torch.aten.quantize_per_tensor %147, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,128,32,32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,32,32],!torch.qint8>
%149 = torch.aten.int_repr %148 : !torch.vtensor<[32,128,32,32],!torch.qint8> -> !torch.vtensor<[32,128,32,32],si8>
%150 = torch.aten._make_per_tensor_quantized_tensor %149, %float3.125000e-02, %int0 : !torch.vtensor<[32,128,32,32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,32,32],!torch.qint8>
%151 = torch.aten.dequantize.self %150 : !torch.vtensor<[32,128,32,32],!torch.qint8> -> !torch.vtensor<[32,128,32,32],f32>
%152 = torch.aten.quantize_per_tensor %12, %float9.765620e-04, %int0, %int12 : !torch.vtensor<[256,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,128,3,3],!torch.qint8>
%153 = torch.aten.int_repr %152 : !torch.vtensor<[256,128,3,3],!torch.qint8> -> !torch.vtensor<[256,128,3,3],si8>
%154 = torch.aten._make_per_tensor_quantized_tensor %153, %float9.765620e-04, %int0 : !torch.vtensor<[256,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,128,3,3],!torch.qint8>
%155 = torch.aten.dequantize.self %154 : !torch.vtensor<[256,128,3,3],!torch.qint8> -> !torch.vtensor<[256,128,3,3],f32>
%156 = torch.aten.quantize_per_tensor %13, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%157 = torch.aten.int_repr %156 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%158 = torch.aten._make_per_tensor_quantized_tensor %157, %float7.812500e-03, %int0 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%159 = torch.aten.dequantize.self %158 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%160 = torch.aten.convolution %151, %155, %159, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,128,32,32],f32>, !torch.vtensor<[256,128,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,32,32],f32>
%161 = torch.aten.relu %160 : !torch.vtensor<[32,256,32,32],f32> -> !torch.vtensor<[32,256,32,32],f32>
%162 = torch.aten.quantize_per_tensor %161, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,256,32,32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%163 = torch.aten.int_repr %162 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],si8>
%164 = torch.aten._make_per_tensor_quantized_tensor %163, %float3.125000e-02, %int0 : !torch.vtensor<[32,256,32,32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%165 = torch.aten.dequantize.self %164 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],f32>
%166 = torch.aten.quantize_per_tensor %14, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%167 = torch.aten.int_repr %166 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%168 = torch.aten._make_per_tensor_quantized_tensor %167, %float1.953130e-03, %int0 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%169 = torch.aten.dequantize.self %168 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%170 = torch.aten.quantize_per_tensor %15, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%171 = torch.aten.int_repr %170 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%172 = torch.aten._make_per_tensor_quantized_tensor %171, %float1.562500e-02, %int0 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%173 = torch.aten.dequantize.self %172 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%174 = torch.aten.convolution %165, %169, %173, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,256,32,32],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,32,32],f32>
%175 = torch.aten.relu %174 : !torch.vtensor<[32,256,32,32],f32> -> !torch.vtensor<[32,256,32,32],f32>
%176 = torch.aten.quantize_per_tensor %175, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,256,32,32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%177 = torch.aten.int_repr %176 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],si8>
%178 = torch.aten._make_per_tensor_quantized_tensor %177, %float1.562500e-02, %int0 : !torch.vtensor<[32,256,32,32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%179 = torch.aten.dequantize.self %178 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],f32>
%180 = torch.aten.max_pool2d %179, %80, %80, %59, %58, %false : !torch.vtensor<[32,256,32,32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,256,16,16],f32>
%181 = torch.aten.quantize_per_tensor %180, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,256,16,16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,16,16],!torch.qint8>
%182 = torch.aten.int_repr %181 : !torch.vtensor<[32,256,16,16],!torch.qint8> -> !torch.vtensor<[32,256,16,16],si8>
%183 = torch.aten._make_per_tensor_quantized_tensor %182, %float1.562500e-02, %int0 : !torch.vtensor<[32,256,16,16],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,16,16],!torch.qint8>
%184 = torch.aten.dequantize.self %183 : !torch.vtensor<[32,256,16,16],!torch.qint8> -> !torch.vtensor<[32,256,16,16],f32>
%185 = torch.aten.quantize_per_tensor %16, %float4.882810e-04, %int0, %int12 : !torch.vtensor<[512,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,256,3,3],!torch.qint8>
%186 = torch.aten.int_repr %185 : !torch.vtensor<[512,256,3,3],!torch.qint8> -> !torch.vtensor<[512,256,3,3],si8>
%187 = torch.aten._make_per_tensor_quantized_tensor %186, %float4.882810e-04, %int0 : !torch.vtensor<[512,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,256,3,3],!torch.qint8>
%188 = torch.aten.dequantize.self %187 : !torch.vtensor<[512,256,3,3],!torch.qint8> -> !torch.vtensor<[512,256,3,3],f32>
%189 = torch.aten.quantize_per_tensor %17, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%190 = torch.aten.int_repr %189 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%191 = torch.aten._make_per_tensor_quantized_tensor %190, %float7.812500e-03, %int0 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%192 = torch.aten.dequantize.self %191 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%193 = torch.aten.convolution %184, %188, %192, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,256,16,16],f32>, !torch.vtensor<[512,256,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,512,16,16],f32>
%194 = torch.aten.relu %193 : !torch.vtensor<[32,512,16,16],f32> -> !torch.vtensor<[32,512,16,16],f32>
%195 = torch.aten.quantize_per_tensor %194, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,512,16,16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,16,16],!torch.qint8>
%196 = torch.aten.int_repr %195 : !torch.vtensor<[32,512,16,16],!torch.qint8> -> !torch.vtensor<[32,512,16,16],si8>
%197 = torch.aten._make_per_tensor_quantized_tensor %196, %float3.125000e-02, %int0 : !torch.vtensor<[32,512,16,16],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,16,16],!torch.qint8>
%198 = torch.aten.dequantize.self %197 : !torch.vtensor<[32,512,16,16],!torch.qint8> -> !torch.vtensor<[32,512,16,16],f32>
%199 = torch.aten.quantize_per_tensor %18, %float9.765620e-04, %int0, %int12 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%200 = torch.aten.int_repr %199 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%201 = torch.aten._make_per_tensor_quantized_tensor %200, %float9.765620e-04, %int0 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%202 = torch.aten.dequantize.self %201 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%203 = torch.aten.quantize_per_tensor %19, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%204 = torch.aten.int_repr %203 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%205 = torch.aten._make_per_tensor_quantized_tensor %204, %float7.812500e-03, %int0 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%206 = torch.aten.dequantize.self %205 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%207 = torch.aten.convolution %198, %202, %206, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,512,16,16],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,512,16,16],f32>
%208 = torch.aten.relu %207 : !torch.vtensor<[32,512,16,16],f32> -> !torch.vtensor<[32,512,16,16],f32>
%209 = torch.aten.quantize_per_tensor %208, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,512,16,16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,16,16],!torch.qint8>
%210 = torch.aten.int_repr %209 : !torch.vtensor<[32,512,16,16],!torch.qint8> -> !torch.vtensor<[32,512,16,16],si8>
%211 = torch.aten._make_per_tensor_quantized_tensor %210, %float1.562500e-02, %int0 : !torch.vtensor<[32,512,16,16],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,16,16],!torch.qint8>
%212 = torch.aten.dequantize.self %211 : !torch.vtensor<[32,512,16,16],!torch.qint8> -> !torch.vtensor<[32,512,16,16],f32>
%213 = torch.aten.quantize_per_tensor %20, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[512,256,2,2],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,256,2,2],!torch.qint8>
%214 = torch.aten.int_repr %213 : !torch.vtensor<[512,256,2,2],!torch.qint8> -> !torch.vtensor<[512,256,2,2],si8>
%215 = torch.aten._make_per_tensor_quantized_tensor %214, %float1.953130e-03, %int0 : !torch.vtensor<[512,256,2,2],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,256,2,2],!torch.qint8>
%216 = torch.aten.dequantize.self %215 : !torch.vtensor<[512,256,2,2],!torch.qint8> -> !torch.vtensor<[512,256,2,2],f32>
%217 = torch.aten.quantize_per_tensor %21, %float4.882810e-04, %int0, %int12 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%218 = torch.aten.int_repr %217 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%219 = torch.aten._make_per_tensor_quantized_tensor %218, %float4.882810e-04, %int0 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%220 = torch.aten.dequantize.self %219 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%221 = torch.aten.convolution %212, %216, %220, %80, %59, %58, %true, %59, %int1 : !torch.vtensor<[32,512,16,16],f32>, !torch.vtensor<[512,256,2,2],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,32,32],f32>
%222 = torch.prim.ListConstruct %221, %179 : (!torch.vtensor<[32,256,32,32],f32>, !torch.vtensor<[32,256,32,32],f32>) -> !torch.list<vtensor>
%223 = torch.aten.cat %222, %int1 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,512,32,32],f32>
%224 = torch.aten.quantize_per_tensor %223, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,512,32,32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,32,32],!torch.qint8>
%225 = torch.aten.int_repr %224 : !torch.vtensor<[32,512,32,32],!torch.qint8> -> !torch.vtensor<[32,512,32,32],si8>
%226 = torch.aten._make_per_tensor_quantized_tensor %225, %float1.562500e-02, %int0 : !torch.vtensor<[32,512,32,32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,32,32],!torch.qint8>
%227 = torch.aten.dequantize.self %226 : !torch.vtensor<[32,512,32,32],!torch.qint8> -> !torch.vtensor<[32,512,32,32],f32>
%228 = torch.aten.quantize_per_tensor %22, %float4.882810e-04, %int0, %int12 : !torch.vtensor<[256,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%229 = torch.aten.int_repr %228 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],si8>
%230 = torch.aten._make_per_tensor_quantized_tensor %229, %float4.882810e-04, %int0 : !torch.vtensor<[256,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,3],!torch.qint8>
%231 = torch.aten.dequantize.self %230 : !torch.vtensor<[256,512,3,3],!torch.qint8> -> !torch.vtensor<[256,512,3,3],f32>
%232 = torch.aten.quantize_per_tensor %23, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%233 = torch.aten.int_repr %232 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%234 = torch.aten._make_per_tensor_quantized_tensor %233, %float1.562500e-02, %int0 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%235 = torch.aten.dequantize.self %234 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%236 = torch.aten.convolution %227, %231, %235, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,512,32,32],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,32,32],f32>
%237 = torch.aten.relu %236 : !torch.vtensor<[32,256,32,32],f32> -> !torch.vtensor<[32,256,32,32],f32>
%238 = torch.aten.quantize_per_tensor %237, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,256,32,32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%239 = torch.aten.int_repr %238 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],si8>
%240 = torch.aten._make_per_tensor_quantized_tensor %239, %float1.562500e-02, %int0 : !torch.vtensor<[32,256,32,32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%241 = torch.aten.dequantize.self %240 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],f32>
%242 = torch.aten.quantize_per_tensor %24, %float9.765620e-04, %int0, %int12 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%243 = torch.aten.int_repr %242 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%244 = torch.aten._make_per_tensor_quantized_tensor %243, %float9.765620e-04, %int0 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%245 = torch.aten.dequantize.self %244 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%246 = torch.aten.quantize_per_tensor %25, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%247 = torch.aten.int_repr %246 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%248 = torch.aten._make_per_tensor_quantized_tensor %247, %float7.812500e-03, %int0 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%249 = torch.aten.dequantize.self %248 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%250 = torch.aten.convolution %241, %245, %249, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,256,32,32],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,32,32],f32>
%251 = torch.aten.relu %250 : !torch.vtensor<[32,256,32,32],f32> -> !torch.vtensor<[32,256,32,32],f32>
%252 = torch.aten.quantize_per_tensor %251, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,256,32,32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%253 = torch.aten.int_repr %252 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],si8>
%254 = torch.aten._make_per_tensor_quantized_tensor %253, %float1.562500e-02, %int0 : !torch.vtensor<[32,256,32,32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,32,32],!torch.qint8>
%255 = torch.aten.dequantize.self %254 : !torch.vtensor<[32,256,32,32],!torch.qint8> -> !torch.vtensor<[32,256,32,32],f32>
%256 = torch.aten.quantize_per_tensor %26, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[256,128,2,2],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,128,2,2],!torch.qint8>
%257 = torch.aten.int_repr %256 : !torch.vtensor<[256,128,2,2],!torch.qint8> -> !torch.vtensor<[256,128,2,2],si8>
%258 = torch.aten._make_per_tensor_quantized_tensor %257, %float1.953130e-03, %int0 : !torch.vtensor<[256,128,2,2],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,128,2,2],!torch.qint8>
%259 = torch.aten.dequantize.self %258 : !torch.vtensor<[256,128,2,2],!torch.qint8> -> !torch.vtensor<[256,128,2,2],f32>
%260 = torch.aten.quantize_per_tensor %27, %float9.765620e-04, %int0, %int12 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%261 = torch.aten.int_repr %260 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%262 = torch.aten._make_per_tensor_quantized_tensor %261, %float9.765620e-04, %int0 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%263 = torch.aten.dequantize.self %262 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%264 = torch.aten.convolution %255, %259, %263, %80, %59, %58, %true, %59, %int1 : !torch.vtensor<[32,256,32,32],f32>, !torch.vtensor<[256,128,2,2],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,64,64],f32>
%265 = torch.prim.ListConstruct %264, %146 : (!torch.vtensor<[32,128,64,64],f32>, !torch.vtensor<[32,128,64,64],f32>) -> !torch.list<vtensor>
%266 = torch.aten.cat %265, %int1 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,256,64,64],f32>
%267 = torch.aten.quantize_per_tensor %266, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,256,64,64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,64,64],!torch.qint8>
%268 = torch.aten.int_repr %267 : !torch.vtensor<[32,256,64,64],!torch.qint8> -> !torch.vtensor<[32,256,64,64],si8>
%269 = torch.aten._make_per_tensor_quantized_tensor %268, %float3.125000e-02, %int0 : !torch.vtensor<[32,256,64,64],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,64,64],!torch.qint8>
%270 = torch.aten.dequantize.self %269 : !torch.vtensor<[32,256,64,64],!torch.qint8> -> !torch.vtensor<[32,256,64,64],f32>
%271 = torch.aten.quantize_per_tensor %28, %float9.765620e-04, %int0, %int12 : !torch.vtensor<[128,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%272 = torch.aten.int_repr %271 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],si8>
%273 = torch.aten._make_per_tensor_quantized_tensor %272, %float9.765620e-04, %int0 : !torch.vtensor<[128,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,3,3],!torch.qint8>
%274 = torch.aten.dequantize.self %273 : !torch.vtensor<[128,256,3,3],!torch.qint8> -> !torch.vtensor<[128,256,3,3],f32>
%275 = torch.aten.quantize_per_tensor %29, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%276 = torch.aten.int_repr %275 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%277 = torch.aten._make_per_tensor_quantized_tensor %276, %float7.812500e-03, %int0 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%278 = torch.aten.dequantize.self %277 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%279 = torch.aten.convolution %270, %274, %278, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,256,64,64],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,64,64],f32>
%280 = torch.aten.relu %279 : !torch.vtensor<[32,128,64,64],f32> -> !torch.vtensor<[32,128,64,64],f32>
%281 = torch.aten.quantize_per_tensor %280, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,128,64,64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%282 = torch.aten.int_repr %281 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],si8>
%283 = torch.aten._make_per_tensor_quantized_tensor %282, %float1.562500e-02, %int0 : !torch.vtensor<[32,128,64,64],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%284 = torch.aten.dequantize.self %283 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],f32>
%285 = torch.aten.quantize_per_tensor %30, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%286 = torch.aten.int_repr %285 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%287 = torch.aten._make_per_tensor_quantized_tensor %286, %float1.953130e-03, %int0 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%288 = torch.aten.dequantize.self %287 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%289 = torch.aten.quantize_per_tensor %31, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%290 = torch.aten.int_repr %289 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%291 = torch.aten._make_per_tensor_quantized_tensor %290, %float7.812500e-03, %int0 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%292 = torch.aten.dequantize.self %291 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%293 = torch.aten.convolution %284, %288, %292, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,128,64,64],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,64,64],f32>
%294 = torch.aten.relu %293 : !torch.vtensor<[32,128,64,64],f32> -> !torch.vtensor<[32,128,64,64],f32>
%295 = torch.aten.quantize_per_tensor %294, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,128,64,64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%296 = torch.aten.int_repr %295 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],si8>
%297 = torch.aten._make_per_tensor_quantized_tensor %296, %float1.562500e-02, %int0 : !torch.vtensor<[32,128,64,64],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,64,64],!torch.qint8>
%298 = torch.aten.dequantize.self %297 : !torch.vtensor<[32,128,64,64],!torch.qint8> -> !torch.vtensor<[32,128,64,64],f32>
%299 = torch.aten.quantize_per_tensor %32, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[128,64,2,2],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,64,2,2],!torch.qint8>
%300 = torch.aten.int_repr %299 : !torch.vtensor<[128,64,2,2],!torch.qint8> -> !torch.vtensor<[128,64,2,2],si8>
%301 = torch.aten._make_per_tensor_quantized_tensor %300, %float1.953130e-03, %int0 : !torch.vtensor<[128,64,2,2],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,64,2,2],!torch.qint8>
%302 = torch.aten.dequantize.self %301 : !torch.vtensor<[128,64,2,2],!torch.qint8> -> !torch.vtensor<[128,64,2,2],f32>
%303 = torch.aten.quantize_per_tensor %33, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%304 = torch.aten.int_repr %303 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%305 = torch.aten._make_per_tensor_quantized_tensor %304, %float1.953130e-03, %int0 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%306 = torch.aten.dequantize.self %305 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%307 = torch.aten.convolution %298, %302, %306, %80, %59, %58, %true, %59, %int1 : !torch.vtensor<[32,128,64,64],f32>, !torch.vtensor<[128,64,2,2],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,128,128],f32>
%308 = torch.prim.ListConstruct %307, %113 : (!torch.vtensor<[32,64,128,128],f32>, !torch.vtensor<[32,64,128,128],f32>) -> !torch.list<vtensor>
%309 = torch.aten.cat %308, %int1 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,128,128,128],f32>
%310 = torch.aten.quantize_per_tensor %309, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,128,128,128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,128,128],!torch.qint8>
%311 = torch.aten.int_repr %310 : !torch.vtensor<[32,128,128,128],!torch.qint8> -> !torch.vtensor<[32,128,128,128],si8>
%312 = torch.aten._make_per_tensor_quantized_tensor %311, %float1.562500e-02, %int0 : !torch.vtensor<[32,128,128,128],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,128,128],!torch.qint8>
%313 = torch.aten.dequantize.self %312 : !torch.vtensor<[32,128,128,128],!torch.qint8> -> !torch.vtensor<[32,128,128,128],f32>
%314 = torch.aten.quantize_per_tensor %34, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[64,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%315 = torch.aten.int_repr %314 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],si8>
%316 = torch.aten._make_per_tensor_quantized_tensor %315, %float1.953130e-03, %int0 : !torch.vtensor<[64,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,128,3,3],!torch.qint8>
%317 = torch.aten.dequantize.self %316 : !torch.vtensor<[64,128,3,3],!torch.qint8> -> !torch.vtensor<[64,128,3,3],f32>
%318 = torch.aten.quantize_per_tensor %35, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%319 = torch.aten.int_repr %318 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%320 = torch.aten._make_per_tensor_quantized_tensor %319, %float7.812500e-03, %int0 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%321 = torch.aten.dequantize.self %320 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%322 = torch.aten.convolution %313, %317, %321, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,128,128,128],f32>, !torch.vtensor<[64,128,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,128,128],f32>
%323 = torch.aten.relu %322 : !torch.vtensor<[32,64,128,128],f32> -> !torch.vtensor<[32,64,128,128],f32>
%324 = torch.aten.quantize_per_tensor %323, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,64,128,128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%325 = torch.aten.int_repr %324 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],si8>
%326 = torch.aten._make_per_tensor_quantized_tensor %325, %float1.562500e-02, %int0 : !torch.vtensor<[32,64,128,128],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%327 = torch.aten.dequantize.self %326 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],f32>
%328 = torch.aten.quantize_per_tensor %36, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%329 = torch.aten.int_repr %328 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%330 = torch.aten._make_per_tensor_quantized_tensor %329, %float1.953130e-03, %int0 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%331 = torch.aten.dequantize.self %330 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%332 = torch.aten.quantize_per_tensor %37, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%333 = torch.aten.int_repr %332 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%334 = torch.aten._make_per_tensor_quantized_tensor %333, %float7.812500e-03, %int0 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%335 = torch.aten.dequantize.self %334 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%336 = torch.aten.convolution %327, %331, %335, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,64,128,128],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,128,128],f32>
%337 = torch.aten.relu %336 : !torch.vtensor<[32,64,128,128],f32> -> !torch.vtensor<[32,64,128,128],f32>
%338 = torch.aten.quantize_per_tensor %337, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,64,128,128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%339 = torch.aten.int_repr %338 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],si8>
%340 = torch.aten._make_per_tensor_quantized_tensor %339, %float1.562500e-02, %int0 : !torch.vtensor<[32,64,128,128],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,128,128],!torch.qint8>
%341 = torch.aten.dequantize.self %340 : !torch.vtensor<[32,64,128,128],!torch.qint8> -> !torch.vtensor<[32,64,128,128],f32>
%342 = torch.aten.quantize_per_tensor %38, %float3.906250e-03, %int0, %int12 : !torch.vtensor<[64,32,2,2],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,32,2,2],!torch.qint8>
%343 = torch.aten.int_repr %342 : !torch.vtensor<[64,32,2,2],!torch.qint8> -> !torch.vtensor<[64,32,2,2],si8>
%344 = torch.aten._make_per_tensor_quantized_tensor %343, %float3.906250e-03, %int0 : !torch.vtensor<[64,32,2,2],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,32,2,2],!torch.qint8>
%345 = torch.aten.dequantize.self %344 : !torch.vtensor<[64,32,2,2],!torch.qint8> -> !torch.vtensor<[64,32,2,2],f32>
%346 = torch.aten.quantize_per_tensor %39, %float1.953130e-03, %int0, %int12 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%347 = torch.aten.int_repr %346 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%348 = torch.aten._make_per_tensor_quantized_tensor %347, %float1.953130e-03, %int0 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%349 = torch.aten.dequantize.self %348 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%350 = torch.aten.convolution %341, %345, %349, %80, %59, %58, %true, %59, %int1 : !torch.vtensor<[32,64,128,128],f32>, !torch.vtensor<[64,32,2,2],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,256,256],f32>
%351 = torch.prim.ListConstruct %350, %79 : (!torch.vtensor<[32,32,256,256],f32>, !torch.vtensor<[32,32,256,256],f32>) -> !torch.list<vtensor>
%352 = torch.aten.cat %351, %int1 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,64,256,256],f32>
%353 = torch.aten.quantize_per_tensor %352, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32,64,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,256,256],!torch.qint8>
%354 = torch.aten.int_repr %353 : !torch.vtensor<[32,64,256,256],!torch.qint8> -> !torch.vtensor<[32,64,256,256],si8>
%355 = torch.aten._make_per_tensor_quantized_tensor %354, %float1.562500e-02, %int0 : !torch.vtensor<[32,64,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,256,256],!torch.qint8>
%356 = torch.aten.dequantize.self %355 : !torch.vtensor<[32,64,256,256],!torch.qint8> -> !torch.vtensor<[32,64,256,256],f32>
%357 = torch.aten.quantize_per_tensor %40, %float3.906250e-03, %int0, %int12 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%358 = torch.aten.int_repr %357 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8>
%359 = torch.aten._make_per_tensor_quantized_tensor %358, %float3.906250e-03, %int0 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8>
%360 = torch.aten.dequantize.self %359 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32>
%361 = torch.aten.quantize_per_tensor %41, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%362 = torch.aten.int_repr %361 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%363 = torch.aten._make_per_tensor_quantized_tensor %362, %float1.562500e-02, %int0 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%364 = torch.aten.dequantize.self %363 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%365 = torch.aten.convolution %356, %360, %364, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,64,256,256],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,256,256],f32>
%366 = torch.aten.relu %365 : !torch.vtensor<[32,32,256,256],f32> -> !torch.vtensor<[32,32,256,256],f32>
%367 = torch.aten.quantize_per_tensor %366, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,32,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%368 = torch.aten.int_repr %367 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],si8>
%369 = torch.aten._make_per_tensor_quantized_tensor %368, %float3.125000e-02, %int0 : !torch.vtensor<[32,32,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%370 = torch.aten.dequantize.self %369 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],f32>
%371 = torch.aten.quantize_per_tensor %42, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%372 = torch.aten.int_repr %371 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8>
%373 = torch.aten._make_per_tensor_quantized_tensor %372, %float7.812500e-03, %int0 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8>
%374 = torch.aten.dequantize.self %373 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32>
%375 = torch.aten.quantize_per_tensor %43, %float1.562500e-02, %int0, %int12 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%376 = torch.aten.int_repr %375 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%377 = torch.aten._make_per_tensor_quantized_tensor %376, %float1.562500e-02, %int0 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%378 = torch.aten.dequantize.self %377 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%379 = torch.aten.convolution %370, %374, %378, %58, %58, %58, %false, %59, %int1 : !torch.vtensor<[32,32,256,256],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,256,256],f32>
%380 = torch.aten.relu %379 : !torch.vtensor<[32,32,256,256],f32> -> !torch.vtensor<[32,32,256,256],f32>
%381 = torch.aten.quantize_per_tensor %380, %float3.125000e-02, %int0, %int12 : !torch.vtensor<[32,32,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%382 = torch.aten.int_repr %381 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],si8>
%383 = torch.aten._make_per_tensor_quantized_tensor %382, %float3.125000e-02, %int0 : !torch.vtensor<[32,32,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,256,256],!torch.qint8>
%384 = torch.aten.dequantize.self %383 : !torch.vtensor<[32,32,256,256],!torch.qint8> -> !torch.vtensor<[32,32,256,256],f32>
%385 = torch.aten.quantize_per_tensor %44, %float7.812500e-03, %int0, %int12 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,1,1],!torch.qint8>
%386 = torch.aten.int_repr %385 : !torch.vtensor<[1,32,1,1],!torch.qint8> -> !torch.vtensor<[1,32,1,1],si8>
%387 = torch.aten._make_per_tensor_quantized_tensor %386, %float7.812500e-03, %int0 : !torch.vtensor<[1,32,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],!torch.qint8>
%388 = torch.aten.dequantize.self %387 : !torch.vtensor<[1,32,1,1],!torch.qint8> -> !torch.vtensor<[1,32,1,1],f32>
%389 = torch.aten.quantize_per_tensor %45, %float3.906250e-03, %int0, %int12 : !torch.vtensor<[1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1],!torch.qint8>
%390 = torch.aten.int_repr %389 : !torch.vtensor<[1],!torch.qint8> -> !torch.vtensor<[1],si8>
%391 = torch.aten._make_per_tensor_quantized_tensor %390, %float3.906250e-03, %int0 : !torch.vtensor<[1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1],!torch.qint8>
%392 = torch.aten.dequantize.self %391 : !torch.vtensor<[1],!torch.qint8> -> !torch.vtensor<[1],f32>
%393 = torch.aten.convolution %384, %388, %392, %58, %59, %58, %false, %59, %int1 : !torch.vtensor<[32,32,256,256],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.vtensor<[1],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,1,256,256],f32>
%394 = torch.aten.quantize_per_tensor %393, %float1.250000e-01, %int0, %int12 : !torch.vtensor<[32,1,256,256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1,256,256],!torch.qint8>
%395 = torch.aten.int_repr %394 : !torch.vtensor<[32,1,256,256],!torch.qint8> -> !torch.vtensor<[32,1,256,256],si8>
%396 = torch.aten._make_per_tensor_quantized_tensor %395, %float1.250000e-01, %int0 : !torch.vtensor<[32,1,256,256],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1,256,256],!torch.qint8>
%397 = torch.aten.dequantize.self %396 : !torch.vtensor<[32,1,256,256],!torch.qint8> -> !torch.vtensor<[32,1,256,256],f32>
%398 = torch.aten.sigmoid %397 : !torch.vtensor<[32,1,256,256],f32> -> !torch.vtensor<[32,1,256,256],f32>
return %398 : !torch.vtensor<[32,1,256,256],f32>
}
}
@AmosLewis
Copy link
Author

%221 = torch.aten.convolution %212, %216, %220, %80, %59, %58, %true, %59, %int1 : !torch.vtensor<[32,512,16,16],f32>, !torch.vtensor<[512,256,2,2],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,32,32],f32>

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment