Skip to content

Instantly share code, notes, and snippets.

@aviator19941
Created June 12, 2024 21:06
Show Gist options
  • Save aviator19941/f76d3e86754517578807a710ed9d1195 to your computer and use it in GitHub Desktop.
Save aviator19941/f76d3e86754517578807a710ed9d1195 to your computer and use it in GitHub Desktop.
before_convert_conv_channels_last_pass.mlir
This file has been truncated, but you can view the full file.
// -----// IR Dump Before ConvertConvToChannelsLastPass (iree-preprocessing-convert-conv-to-channels-last) //----- //
util.func public @main(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view, %arg3: !hal.buffer_view, %arg4: !hal.buffer_view, %arg5: !hal.buffer_view) -> !hal.buffer_view attributes {iree.abi.stub} {
%c-1_i32 = arith.constant -1 : i32
%c0 = arith.constant 0 : index
%device_0 = hal.devices.get %c0 : !hal.device
%0 = util.null : !hal.fence
%fence = hal.fence.create device(%device_0 : !hal.device) flags("None") : !hal.fence
%1 = util.call @main$async(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %0, %fence) : (!hal.buffer_view, !hal.buffer_view, !hal.buffer_view, !hal.buffer_view, !hal.buffer_view, !hal.buffer_view, !hal.fence, !hal.fence) -> !hal.buffer_view
%status = hal.fence.await until([%fence]) timeout_millis(%c-1_i32) : i32
util.return %1 : !hal.buffer_view
}
// -----// IR Dump Before ConvertConvToChannelsLastPass (iree-preprocessing-convert-conv-to-channels-last) //----- //
util.func public @main$async(%arg0: !hal.buffer_view, %arg1: !hal.buffer_view, %arg2: !hal.buffer_view, %arg3: !hal.buffer_view, %arg4: !hal.buffer_view, %arg5: !hal.buffer_view, %arg6: !hal.fence, %arg7: !hal.fence) -> !hal.buffer_view attributes {inlining_policy = #util.inline.never, iree.abi.model = "coarse-fences", iree.abi.stub} {
%cst = arith.constant 0.000000e+00 : f32
%c0_i64 = arith.constant 0 : i64
%cst_0 = arith.constant 1.000000e+00 : f16
%cst_1 = arith.constant 0.000000e+00 : f16
%cst_2 = arith.constant 0xFF800000 : f32
%cst_3 = arith.constant 2.000000e+00 : f16
%cst_4 = arith.constant 5.000000e-01 : f16
%cst_5 = arith.constant 0.353553385 : f32
%cst_6 = arith.constant 9.99999997E-7 : f32
%cst_7 = arith.constant 9.99999974E-6 : f32
%cst_8 = arith.constant -9.21033954 : f32
%cst_9 = arith.constant 1.590000e+02 : f32
%cst_10 = arith.constant 1.280000e+02 : f32
%cst_11 = arith.constant -1.280000e+02 : f16
%cst_12 = arith.constant 1.270000e+02 : f16
%cst_13 = arith.constant 1.638400e+05 : f32
%cst_14 = arith.constant 4.096000e+04 : f32
%cst_15 = arith.constant 8.192000e+04 : f32
%cst_16 = arith.constant 6.400000e+02 : f32
%cst_17 = arith.constant 2.048000e+04 : f32
%cst_18 = arith.constant 1.280000e+03 : f32
%cst_19 = arith.constant 6.144000e+04 : f32
%cst_20 = arith.constant 5.000000e-01 : f32
%c2_i64 = arith.constant 2 : i64
%c1280_i64 = arith.constant 1280 : i64
%c32_i64 = arith.constant 32 : i64
%cst_21 = arith.constant 2.457600e+05 : f32
%cst_22 = arith.constant 1.228800e+05 : f32
%c640_i64 = arith.constant 640 : i64
%c64_i64 = arith.constant 64 : i64
%cst_23 = arith.constant 4.915200e+05 : f32
%cst_24 = arith.constant 3.276800e+05 : f32
%0 = hal.tensor.import wait(%arg6) => %arg0 : !hal.buffer_view -> tensor<1x4x128x128xf16>
%1 = hal.tensor.import wait(%arg6) => %arg1 : !hal.buffer_view -> tensor<1xi32>
%2 = hal.tensor.import wait(%arg6) => %arg2 : !hal.buffer_view -> tensor<2x64x2048xf16>
%3 = hal.tensor.import wait(%arg6) => %arg3 : !hal.buffer_view -> tensor<2x1280xf16>
%4 = hal.tensor.import wait(%arg6) => %arg4 : !hal.buffer_view -> tensor<2x6xf16>
%5 = hal.tensor.import wait(%arg6) => %arg5 : !hal.buffer_view -> tensor<1xf16>
%__auto.time_embedding.linear_1.weight = util.global.load @__auto.time_embedding.linear_1.weight : tensor<1280x320xf16>
%__auto.time_embedding.linear_1.bias = util.global.load @__auto.time_embedding.linear_1.bias : tensor<1280xf16>
%__auto.time_embedding.linear_2.weight = util.global.load @__auto.time_embedding.linear_2.weight : tensor<1280x1280xf16>
%__auto.time_embedding.linear_2.bias = util.global.load @__auto.time_embedding.linear_2.bias : tensor<1280xf16>
%__auto.add_embedding.linear_1.weight = util.global.load @__auto.add_embedding.linear_1.weight : tensor<1280x2816xf16>
%__auto.add_embedding.linear_1.bias = util.global.load @__auto.add_embedding.linear_1.bias : tensor<1280xf16>
%__auto.add_embedding.linear_2.weight = util.global.load @__auto.add_embedding.linear_2.weight : tensor<1280x1280xf16>
%__auto.add_embedding.linear_2.bias = util.global.load @__auto.add_embedding.linear_2.bias : tensor<1280xf16>
%__auto.conv_in.premul_input = util.global.load @__auto.conv_in.premul_input : tensor<1x4x1x1xf16>
%__auto.conv_in.q_input3Ascale = util.global.load @"__auto.conv_in.q_input:scale" : tensor<f16>
%__auto.conv_in.q_input3Arscale = util.global.load @"__auto.conv_in.q_input:rscale" : tensor<f16>
%__auto.conv_in.weight3Ad = util.global.load @"__auto.conv_in.weight:d" : tensor<320x1x1x1xf16>
%__auto.conv_in.weight3Am = util.global.load @"__auto.conv_in.weight:m" : tensor<320x1x1x1xi8>
%__auto.conv_in.weight3Aqs = util.global.load @"__auto.conv_in.weight:qs" : tensor<320x4x3x3xi8>
%__auto.conv_in.bias = util.global.load @__auto.conv_in.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.norm1.weight = util.global.load @__auto.down_blocks.0.resnets.0.norm1.weight : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.norm1.bias = util.global.load @__auto.down_blocks.0.resnets.0.norm1.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.conv1.premul_input = util.global.load @__auto.down_blocks.0.resnets.0.conv1.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.0.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.q_input:scale" : tensor<f16>
%__auto.down_blocks.0.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.q_input:rscale" : tensor<f16>
%__auto.down_blocks.0.resnets.0.conv1.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.weight:d" : tensor<320x1x1x1xf16>
%__auto.down_blocks.0.resnets.0.conv1.weight3Am = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.weight:m" : tensor<320x1x1x1xi8>
%__auto.down_blocks.0.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.weight:qs" : tensor<320x320x3x3xi8>
%__auto.down_blocks.0.resnets.0.conv1.bias = util.global.load @__auto.down_blocks.0.resnets.0.conv1.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.time_emb_proj.weight = util.global.load @__auto.down_blocks.0.resnets.0.time_emb_proj.weight : tensor<320x1280xf16>
%__auto.down_blocks.0.resnets.0.time_emb_proj.bias = util.global.load @__auto.down_blocks.0.resnets.0.time_emb_proj.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.norm2.weight = util.global.load @__auto.down_blocks.0.resnets.0.norm2.weight : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.norm2.bias = util.global.load @__auto.down_blocks.0.resnets.0.norm2.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.0.conv2.premul_input = util.global.load @__auto.down_blocks.0.resnets.0.conv2.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.0.resnets.0.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.q_input:scale" : tensor<f16>
%__auto.down_blocks.0.resnets.0.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.0.resnets.0.conv2.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.weight:d" : tensor<320x1x1x1xf16>
%__auto.down_blocks.0.resnets.0.conv2.weight3Am = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.weight:m" : tensor<320x1x1x1xi8>
%__auto.down_blocks.0.resnets.0.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.weight:qs" : tensor<320x320x3x3xi8>
%__auto.down_blocks.0.resnets.0.conv2.bias = util.global.load @__auto.down_blocks.0.resnets.0.conv2.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.norm1.weight = util.global.load @__auto.down_blocks.0.resnets.1.norm1.weight : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.norm1.bias = util.global.load @__auto.down_blocks.0.resnets.1.norm1.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.conv1.premul_input = util.global.load @__auto.down_blocks.0.resnets.1.conv1.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.0.resnets.1.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.q_input:scale" : tensor<f16>
%__auto.down_blocks.0.resnets.1.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.q_input:rscale" : tensor<f16>
%__auto.down_blocks.0.resnets.1.conv1.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.weight:d" : tensor<320x1x1x1xf16>
%__auto.down_blocks.0.resnets.1.conv1.weight3Am = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.weight:m" : tensor<320x1x1x1xi8>
%__auto.down_blocks.0.resnets.1.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.weight:qs" : tensor<320x320x3x3xi8>
%__auto.down_blocks.0.resnets.1.conv1.bias = util.global.load @__auto.down_blocks.0.resnets.1.conv1.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.time_emb_proj.weight = util.global.load @__auto.down_blocks.0.resnets.1.time_emb_proj.weight : tensor<320x1280xf16>
%__auto.down_blocks.0.resnets.1.time_emb_proj.bias = util.global.load @__auto.down_blocks.0.resnets.1.time_emb_proj.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.norm2.weight = util.global.load @__auto.down_blocks.0.resnets.1.norm2.weight : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.norm2.bias = util.global.load @__auto.down_blocks.0.resnets.1.norm2.bias : tensor<320xf16>
%__auto.down_blocks.0.resnets.1.conv2.premul_input = util.global.load @__auto.down_blocks.0.resnets.1.conv2.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.0.resnets.1.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.q_input:scale" : tensor<f16>
%__auto.down_blocks.0.resnets.1.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.0.resnets.1.conv2.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.weight:d" : tensor<320x1x1x1xf16>
%__auto.down_blocks.0.resnets.1.conv2.weight3Am = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.weight:m" : tensor<320x1x1x1xi8>
%__auto.down_blocks.0.resnets.1.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.weight:qs" : tensor<320x320x3x3xi8>
%__auto.down_blocks.0.resnets.1.conv2.bias = util.global.load @__auto.down_blocks.0.resnets.1.conv2.bias : tensor<320xf16>
%__auto.down_blocks.0.downsamplers.0.conv.premul_input = util.global.load @__auto.down_blocks.0.downsamplers.0.conv.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.0.downsamplers.0.conv.q_input3Ascale = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.q_input:scale" : tensor<f16>
%__auto.down_blocks.0.downsamplers.0.conv.q_input3Arscale = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.q_input:rscale" : tensor<f16>
%__auto.down_blocks.0.downsamplers.0.conv.weight3Ad = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.weight:d" : tensor<320x1x1x1xf16>
%__auto.down_blocks.0.downsamplers.0.conv.weight3Am = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.weight:m" : tensor<320x1x1x1xi8>
%__auto.down_blocks.0.downsamplers.0.conv.weight3Aqs = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.weight:qs" : tensor<320x320x3x3xi8>
%__auto.down_blocks.0.downsamplers.0.conv.bias = util.global.load @__auto.down_blocks.0.downsamplers.0.conv.bias : tensor<320xf16>
%__auto.down_blocks.1.resnets.0.norm1.weight = util.global.load @__auto.down_blocks.1.resnets.0.norm1.weight : tensor<320xf16>
%__auto.down_blocks.1.resnets.0.norm1.bias = util.global.load @__auto.down_blocks.1.resnets.0.norm1.bias : tensor<320xf16>
%__auto.down_blocks.1.resnets.0.conv1.premul_input = util.global.load @__auto.down_blocks.1.resnets.0.conv1.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.1.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.resnets.0.conv1.weight3Ad = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.weight:d" : tensor<640x1x1x1xf16>
%__auto.down_blocks.1.resnets.0.conv1.weight3Am = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.weight:m" : tensor<640x1x1x1xi8>
%__auto.down_blocks.1.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.weight:qs" : tensor<640x320x3x3xi8>
%__auto.down_blocks.1.resnets.0.conv1.bias = util.global.load @__auto.down_blocks.1.resnets.0.conv1.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.0.time_emb_proj.weight = util.global.load @__auto.down_blocks.1.resnets.0.time_emb_proj.weight : tensor<640x1280xf16>
%__auto.down_blocks.1.resnets.0.time_emb_proj.bias = util.global.load @__auto.down_blocks.1.resnets.0.time_emb_proj.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.0.norm2.weight = util.global.load @__auto.down_blocks.1.resnets.0.norm2.weight : tensor<640xf16>
%__auto.down_blocks.1.resnets.0.norm2.bias = util.global.load @__auto.down_blocks.1.resnets.0.norm2.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.0.conv2.premul_input = util.global.load @__auto.down_blocks.1.resnets.0.conv2.premul_input : tensor<1x640x1x1xf16>
%__auto.down_blocks.1.resnets.0.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.1.resnets.0.conv2.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.resnets.0.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.1.resnets.0.conv2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.resnets.0.conv2.weight3Ad = util.global.load @"__auto.down_blocks.1.resnets.0.conv2.weight:d" : tensor<640x1x1x1xf16>
%__auto.down_blocks.1.resnets.0.conv2.weight3Am = util.global.load @"__auto.down_blocks.1.resnets.0.conv2.weight:m" : tensor<640x1x1x1xi8>
%__auto.down_blocks.1.resnets.0.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.1.resnets.0.conv2.weight:qs" : tensor<640x640x3x3xi8>
%__auto.down_blocks.1.resnets.0.conv2.bias = util.global.load @__auto.down_blocks.1.resnets.0.conv2.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.0.conv_shortcut.premul_input = util.global.load @__auto.down_blocks.1.resnets.0.conv_shortcut.premul_input : tensor<1x320x1x1xf16>
%__auto.down_blocks.1.resnets.0.conv_shortcut.q_input3Ascale = util.global.load @"__auto.down_blocks.1.resnets.0.conv_shortcut.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.resnets.0.conv_shortcut.q_input3Arscale = util.global.load @"__auto.down_blocks.1.resnets.0.conv_shortcut.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.resnets.0.conv_shortcut.weight3Ad = util.global.load @"__auto.down_blocks.1.resnets.0.conv_shortcut.weight:d" : tensor<640x1x1x1xf16>
%__auto.down_blocks.1.resnets.0.conv_shortcut.weight3Am = util.global.load @"__auto.down_blocks.1.resnets.0.conv_shortcut.weight:m" : tensor<640x1x1x1xi8>
%__auto.down_blocks.1.resnets.0.conv_shortcut.weight3Aqs = util.global.load @"__auto.down_blocks.1.resnets.0.conv_shortcut.weight:qs" : tensor<640x320x1x1xi8>
%__auto.down_blocks.1.resnets.0.conv_shortcut.bias = util.global.load @__auto.down_blocks.1.resnets.0.conv_shortcut.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.norm.weight = util.global.load @__auto.down_blocks.1.attentions.0.norm.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.norm.bias = util.global.load @__auto.down_blocks.1.attentions.0.norm.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.proj_in.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.proj_in.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.proj_in.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.proj_in.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.proj_in.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.proj_in.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.proj_in.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.proj_in.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.proj_in.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.proj_in.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.proj_in.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.proj_in.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.proj_in.bias = util.global.load @__auto.down_blocks.1.attentions.0.proj_in.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<5120x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<5120x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<5120x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<5120xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x2560xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" : tensor<640x2560xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<5120x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<5120x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<5120x640xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<5120xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x2560xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" : tensor<640x2560xi8>
%__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.0.proj_out.premul_input = util.global.load @__auto.down_blocks.1.attentions.0.proj_out.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.0.proj_out.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.0.proj_out.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.proj_out.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.0.proj_out.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.0.proj_out.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.0.proj_out.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.0.proj_out.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.0.proj_out.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.0.proj_out.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.0.proj_out.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.0.proj_out.bias = util.global.load @__auto.down_blocks.1.attentions.0.proj_out.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.norm1.weight = util.global.load @__auto.down_blocks.1.resnets.1.norm1.weight : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.norm1.bias = util.global.load @__auto.down_blocks.1.resnets.1.norm1.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.conv1.premul_input = util.global.load @__auto.down_blocks.1.resnets.1.conv1.premul_input : tensor<1x640x1x1xf16>
%__auto.down_blocks.1.resnets.1.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.1.resnets.1.conv1.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.resnets.1.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.1.resnets.1.conv1.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.resnets.1.conv1.weight3Ad = util.global.load @"__auto.down_blocks.1.resnets.1.conv1.weight:d" : tensor<640x1x1x1xf16>
%__auto.down_blocks.1.resnets.1.conv1.weight3Am = util.global.load @"__auto.down_blocks.1.resnets.1.conv1.weight:m" : tensor<640x1x1x1xi8>
%__auto.down_blocks.1.resnets.1.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.1.resnets.1.conv1.weight:qs" : tensor<640x640x3x3xi8>
%__auto.down_blocks.1.resnets.1.conv1.bias = util.global.load @__auto.down_blocks.1.resnets.1.conv1.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.time_emb_proj.weight = util.global.load @__auto.down_blocks.1.resnets.1.time_emb_proj.weight : tensor<640x1280xf16>
%__auto.down_blocks.1.resnets.1.time_emb_proj.bias = util.global.load @__auto.down_blocks.1.resnets.1.time_emb_proj.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.norm2.weight = util.global.load @__auto.down_blocks.1.resnets.1.norm2.weight : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.norm2.bias = util.global.load @__auto.down_blocks.1.resnets.1.norm2.bias : tensor<640xf16>
%__auto.down_blocks.1.resnets.1.conv2.premul_input = util.global.load @__auto.down_blocks.1.resnets.1.conv2.premul_input : tensor<1x640x1x1xf16>
%__auto.down_blocks.1.resnets.1.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.1.resnets.1.conv2.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.resnets.1.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.1.resnets.1.conv2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.resnets.1.conv2.weight3Ad = util.global.load @"__auto.down_blocks.1.resnets.1.conv2.weight:d" : tensor<640x1x1x1xf16>
%__auto.down_blocks.1.resnets.1.conv2.weight3Am = util.global.load @"__auto.down_blocks.1.resnets.1.conv2.weight:m" : tensor<640x1x1x1xi8>
%__auto.down_blocks.1.resnets.1.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.1.resnets.1.conv2.weight:qs" : tensor<640x640x3x3xi8>
%__auto.down_blocks.1.resnets.1.conv2.bias = util.global.load @__auto.down_blocks.1.resnets.1.conv2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.norm.weight = util.global.load @__auto.down_blocks.1.attentions.1.norm.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.norm.bias = util.global.load @__auto.down_blocks.1.attentions.1.norm.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.proj_in.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.proj_in.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.proj_in.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.proj_in.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.proj_in.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.proj_in.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.proj_in.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.proj_in.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.proj_in.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.proj_in.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.proj_in.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.proj_in.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.proj_in.bias = util.global.load @__auto.down_blocks.1.attentions.1.proj_in.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<5120x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<5120x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<5120x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias : tensor<5120xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x2560xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" : tensor<640x2560xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<640x2048xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<5120x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<5120x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<5120x640xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias : tensor<5120xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x2560xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" : tensor<640x2560xi8>
%__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias : tensor<640xf16>
%__auto.down_blocks.1.attentions.1.proj_out.premul_input = util.global.load @__auto.down_blocks.1.attentions.1.proj_out.premul_input : tensor<1x1x640xf16>
%__auto.down_blocks.1.attentions.1.proj_out.q_input3Ascale = util.global.load @"__auto.down_blocks.1.attentions.1.proj_out.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.proj_out.q_input3Arscale = util.global.load @"__auto.down_blocks.1.attentions.1.proj_out.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.attentions.1.proj_out.weight3Ad = util.global.load @"__auto.down_blocks.1.attentions.1.proj_out.weight:d" : tensor<640x1xf16>
%__auto.down_blocks.1.attentions.1.proj_out.weight3Am = util.global.load @"__auto.down_blocks.1.attentions.1.proj_out.weight:m" : tensor<640x1xi8>
%__auto.down_blocks.1.attentions.1.proj_out.weight3Aqs = util.global.load @"__auto.down_blocks.1.attentions.1.proj_out.weight:qs" : tensor<640x640xi8>
%__auto.down_blocks.1.attentions.1.proj_out.bias = util.global.load @__auto.down_blocks.1.attentions.1.proj_out.bias : tensor<640xf16>
%__auto.down_blocks.1.downsamplers.0.conv.premul_input = util.global.load @__auto.down_blocks.1.downsamplers.0.conv.premul_input : tensor<1x640x1x1xf16>
%__auto.down_blocks.1.downsamplers.0.conv.q_input3Ascale = util.global.load @"__auto.down_blocks.1.downsamplers.0.conv.q_input:scale" : tensor<f16>
%__auto.down_blocks.1.downsamplers.0.conv.q_input3Arscale = util.global.load @"__auto.down_blocks.1.downsamplers.0.conv.q_input:rscale" : tensor<f16>
%__auto.down_blocks.1.downsamplers.0.conv.weight3Ad = util.global.load @"__auto.down_blocks.1.downsamplers.0.conv.weight:d" : tensor<640x1x1x1xf16>
%__auto.down_blocks.1.downsamplers.0.conv.weight3Am = util.global.load @"__auto.down_blocks.1.downsamplers.0.conv.weight:m" : tensor<640x1x1x1xi8>
%__auto.down_blocks.1.downsamplers.0.conv.weight3Aqs = util.global.load @"__auto.down_blocks.1.downsamplers.0.conv.weight:qs" : tensor<640x640x3x3xi8>
%__auto.down_blocks.1.downsamplers.0.conv.bias = util.global.load @__auto.down_blocks.1.downsamplers.0.conv.bias : tensor<640xf16>
%__auto.down_blocks.2.resnets.0.norm1.weight = util.global.load @__auto.down_blocks.2.resnets.0.norm1.weight : tensor<640xf16>
%__auto.down_blocks.2.resnets.0.norm1.bias = util.global.load @__auto.down_blocks.2.resnets.0.norm1.bias : tensor<640xf16>
%__auto.down_blocks.2.resnets.0.conv1.premul_input = util.global.load @__auto.down_blocks.2.resnets.0.conv1.premul_input : tensor<1x640x1x1xf16>
%__auto.down_blocks.2.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.2.resnets.0.conv1.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.2.resnets.0.conv1.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.resnets.0.conv1.weight3Ad = util.global.load @"__auto.down_blocks.2.resnets.0.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.down_blocks.2.resnets.0.conv1.weight3Am = util.global.load @"__auto.down_blocks.2.resnets.0.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.down_blocks.2.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.2.resnets.0.conv1.weight:qs" : tensor<1280x640x3x3xi8>
%__auto.down_blocks.2.resnets.0.conv1.bias = util.global.load @__auto.down_blocks.2.resnets.0.conv1.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.0.time_emb_proj.weight = util.global.load @__auto.down_blocks.2.resnets.0.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.down_blocks.2.resnets.0.time_emb_proj.bias = util.global.load @__auto.down_blocks.2.resnets.0.time_emb_proj.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.0.norm2.weight = util.global.load @__auto.down_blocks.2.resnets.0.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.resnets.0.norm2.bias = util.global.load @__auto.down_blocks.2.resnets.0.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.0.conv2.premul_input = util.global.load @__auto.down_blocks.2.resnets.0.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.down_blocks.2.resnets.0.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.resnets.0.conv2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.resnets.0.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.resnets.0.conv2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.resnets.0.conv2.weight3Ad = util.global.load @"__auto.down_blocks.2.resnets.0.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.down_blocks.2.resnets.0.conv2.weight3Am = util.global.load @"__auto.down_blocks.2.resnets.0.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.down_blocks.2.resnets.0.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.2.resnets.0.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.down_blocks.2.resnets.0.conv2.bias = util.global.load @__auto.down_blocks.2.resnets.0.conv2.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.0.conv_shortcut.premul_input = util.global.load @__auto.down_blocks.2.resnets.0.conv_shortcut.premul_input : tensor<1x640x1x1xf16>
%__auto.down_blocks.2.resnets.0.conv_shortcut.q_input3Ascale = util.global.load @"__auto.down_blocks.2.resnets.0.conv_shortcut.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.resnets.0.conv_shortcut.q_input3Arscale = util.global.load @"__auto.down_blocks.2.resnets.0.conv_shortcut.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.resnets.0.conv_shortcut.weight3Ad = util.global.load @"__auto.down_blocks.2.resnets.0.conv_shortcut.weight:d" : tensor<1280x1x1x1xf16>
%__auto.down_blocks.2.resnets.0.conv_shortcut.weight3Am = util.global.load @"__auto.down_blocks.2.resnets.0.conv_shortcut.weight:m" : tensor<1280x1x1x1xi8>
%__auto.down_blocks.2.resnets.0.conv_shortcut.weight3Aqs = util.global.load @"__auto.down_blocks.2.resnets.0.conv_shortcut.weight:qs" : tensor<1280x640x1x1xi8>
%__auto.down_blocks.2.resnets.0.conv_shortcut.bias = util.global.load @__auto.down_blocks.2.resnets.0.conv_shortcut.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.norm.weight = util.global.load @__auto.down_blocks.2.attentions.0.norm.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.norm.bias = util.global.load @__auto.down_blocks.2.attentions.0.norm.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.proj_in.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.proj_in.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.proj_in.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.proj_in.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.proj_in.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.proj_in.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.proj_in.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.proj_in.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.proj_in.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.proj_in.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.proj_in.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.proj_in.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.proj_in.bias = util.global.load @__auto.down_blocks.2.attentions.0.proj_in.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.0.proj_out.premul_input = util.global.load @__auto.down_blocks.2.attentions.0.proj_out.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.0.proj_out.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.0.proj_out.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.proj_out.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.0.proj_out.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.0.proj_out.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.0.proj_out.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.0.proj_out.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.0.proj_out.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.0.proj_out.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.0.proj_out.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.0.proj_out.bias = util.global.load @__auto.down_blocks.2.attentions.0.proj_out.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.norm1.weight = util.global.load @__auto.down_blocks.2.resnets.1.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.norm1.bias = util.global.load @__auto.down_blocks.2.resnets.1.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.conv1.premul_input = util.global.load @__auto.down_blocks.2.resnets.1.conv1.premul_input : tensor<1x1280x1x1xf16>
%__auto.down_blocks.2.resnets.1.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.2.resnets.1.conv1.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.resnets.1.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.2.resnets.1.conv1.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.resnets.1.conv1.weight3Ad = util.global.load @"__auto.down_blocks.2.resnets.1.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.down_blocks.2.resnets.1.conv1.weight3Am = util.global.load @"__auto.down_blocks.2.resnets.1.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.down_blocks.2.resnets.1.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.2.resnets.1.conv1.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.down_blocks.2.resnets.1.conv1.bias = util.global.load @__auto.down_blocks.2.resnets.1.conv1.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.time_emb_proj.weight = util.global.load @__auto.down_blocks.2.resnets.1.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.down_blocks.2.resnets.1.time_emb_proj.bias = util.global.load @__auto.down_blocks.2.resnets.1.time_emb_proj.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.norm2.weight = util.global.load @__auto.down_blocks.2.resnets.1.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.norm2.bias = util.global.load @__auto.down_blocks.2.resnets.1.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.resnets.1.conv2.premul_input = util.global.load @__auto.down_blocks.2.resnets.1.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.down_blocks.2.resnets.1.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.resnets.1.conv2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.resnets.1.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.resnets.1.conv2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.resnets.1.conv2.weight3Ad = util.global.load @"__auto.down_blocks.2.resnets.1.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.down_blocks.2.resnets.1.conv2.weight3Am = util.global.load @"__auto.down_blocks.2.resnets.1.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.down_blocks.2.resnets.1.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.2.resnets.1.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.down_blocks.2.resnets.1.conv2.bias = util.global.load @__auto.down_blocks.2.resnets.1.conv2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.norm.weight = util.global.load @__auto.down_blocks.2.attentions.1.norm.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.norm.bias = util.global.load @__auto.down_blocks.2.attentions.1.norm.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.proj_in.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.proj_in.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.proj_in.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.proj_in.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.proj_in.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.proj_in.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.proj_in.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.proj_in.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.proj_in.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.proj_in.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.proj_in.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.proj_in.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.proj_in.bias = util.global.load @__auto.down_blocks.2.attentions.1.proj_in.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm1.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm1.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm1.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm1.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm2.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm2.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm3.weight = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm3.weight : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm3.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm3.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias = util.global.load @__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias : tensor<1280xf16>
%__auto.down_blocks.2.attentions.1.proj_out.premul_input = util.global.load @__auto.down_blocks.2.attentions.1.proj_out.premul_input : tensor<1x1x1280xf16>
%__auto.down_blocks.2.attentions.1.proj_out.q_input3Ascale = util.global.load @"__auto.down_blocks.2.attentions.1.proj_out.q_input:scale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.proj_out.q_input3Arscale = util.global.load @"__auto.down_blocks.2.attentions.1.proj_out.q_input:rscale" : tensor<f16>
%__auto.down_blocks.2.attentions.1.proj_out.weight3Ad = util.global.load @"__auto.down_blocks.2.attentions.1.proj_out.weight:d" : tensor<1280x1xf16>
%__auto.down_blocks.2.attentions.1.proj_out.weight3Am = util.global.load @"__auto.down_blocks.2.attentions.1.proj_out.weight:m" : tensor<1280x1xi8>
%__auto.down_blocks.2.attentions.1.proj_out.weight3Aqs = util.global.load @"__auto.down_blocks.2.attentions.1.proj_out.weight:qs" : tensor<1280x1280xi8>
%__auto.down_blocks.2.attentions.1.proj_out.bias = util.global.load @__auto.down_blocks.2.attentions.1.proj_out.bias : tensor<1280xf16>
%__auto.mid_block.resnets.0.norm1.weight = util.global.load @__auto.mid_block.resnets.0.norm1.weight : tensor<1280xf16>
%__auto.mid_block.resnets.0.norm1.bias = util.global.load @__auto.mid_block.resnets.0.norm1.bias : tensor<1280xf16>
%__auto.mid_block.resnets.0.conv1.premul_input = util.global.load @__auto.mid_block.resnets.0.conv1.premul_input : tensor<1x1280x1x1xf16>
%__auto.mid_block.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.mid_block.resnets.0.conv1.q_input:scale" : tensor<f16>
%__auto.mid_block.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.mid_block.resnets.0.conv1.q_input:rscale" : tensor<f16>
%__auto.mid_block.resnets.0.conv1.weight3Ad = util.global.load @"__auto.mid_block.resnets.0.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.mid_block.resnets.0.conv1.weight3Am = util.global.load @"__auto.mid_block.resnets.0.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.mid_block.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.mid_block.resnets.0.conv1.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.mid_block.resnets.0.conv1.bias = util.global.load @__auto.mid_block.resnets.0.conv1.bias : tensor<1280xf16>
%__auto.mid_block.resnets.0.time_emb_proj.weight = util.global.load @__auto.mid_block.resnets.0.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.mid_block.resnets.0.time_emb_proj.bias = util.global.load @__auto.mid_block.resnets.0.time_emb_proj.bias : tensor<1280xf16>
%__auto.mid_block.resnets.0.norm2.weight = util.global.load @__auto.mid_block.resnets.0.norm2.weight : tensor<1280xf16>
%__auto.mid_block.resnets.0.norm2.bias = util.global.load @__auto.mid_block.resnets.0.norm2.bias : tensor<1280xf16>
%__auto.mid_block.resnets.0.conv2.premul_input = util.global.load @__auto.mid_block.resnets.0.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.mid_block.resnets.0.conv2.q_input3Ascale = util.global.load @"__auto.mid_block.resnets.0.conv2.q_input:scale" : tensor<f16>
%__auto.mid_block.resnets.0.conv2.q_input3Arscale = util.global.load @"__auto.mid_block.resnets.0.conv2.q_input:rscale" : tensor<f16>
%__auto.mid_block.resnets.0.conv2.weight3Ad = util.global.load @"__auto.mid_block.resnets.0.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.mid_block.resnets.0.conv2.weight3Am = util.global.load @"__auto.mid_block.resnets.0.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.mid_block.resnets.0.conv2.weight3Aqs = util.global.load @"__auto.mid_block.resnets.0.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.mid_block.resnets.0.conv2.bias = util.global.load @__auto.mid_block.resnets.0.conv2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.norm.weight = util.global.load @__auto.mid_block.attentions.0.norm.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.norm.bias = util.global.load @__auto.mid_block.attentions.0.norm.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.proj_in.premul_input = util.global.load @__auto.mid_block.attentions.0.proj_in.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.proj_in.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.proj_in.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.proj_in.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.proj_in.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.proj_in.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.proj_in.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.proj_in.weight3Am = util.global.load @"__auto.mid_block.attentions.0.proj_in.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.proj_in.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.proj_in.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.proj_in.bias = util.global.load @__auto.mid_block.attentions.0.proj_in.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.norm1.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.norm1.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.norm1.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.norm1.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.norm2.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.norm2.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.norm2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.norm2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.norm3.weight = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.norm3.weight : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.norm3.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.norm3.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.premul_input = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight3Am = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias = util.global.load @__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias : tensor<1280xf16>
%__auto.mid_block.attentions.0.proj_out.premul_input = util.global.load @__auto.mid_block.attentions.0.proj_out.premul_input : tensor<1x1x1280xf16>
%__auto.mid_block.attentions.0.proj_out.q_input3Ascale = util.global.load @"__auto.mid_block.attentions.0.proj_out.q_input:scale" : tensor<f16>
%__auto.mid_block.attentions.0.proj_out.q_input3Arscale = util.global.load @"__auto.mid_block.attentions.0.proj_out.q_input:rscale" : tensor<f16>
%__auto.mid_block.attentions.0.proj_out.weight3Ad = util.global.load @"__auto.mid_block.attentions.0.proj_out.weight:d" : tensor<1280x1xf16>
%__auto.mid_block.attentions.0.proj_out.weight3Am = util.global.load @"__auto.mid_block.attentions.0.proj_out.weight:m" : tensor<1280x1xi8>
%__auto.mid_block.attentions.0.proj_out.weight3Aqs = util.global.load @"__auto.mid_block.attentions.0.proj_out.weight:qs" : tensor<1280x1280xi8>
%__auto.mid_block.attentions.0.proj_out.bias = util.global.load @__auto.mid_block.attentions.0.proj_out.bias : tensor<1280xf16>
%__auto.mid_block.resnets.1.norm1.weight = util.global.load @__auto.mid_block.resnets.1.norm1.weight : tensor<1280xf16>
%__auto.mid_block.resnets.1.norm1.bias = util.global.load @__auto.mid_block.resnets.1.norm1.bias : tensor<1280xf16>
%__auto.mid_block.resnets.1.conv1.premul_input = util.global.load @__auto.mid_block.resnets.1.conv1.premul_input : tensor<1x1280x1x1xf16>
%__auto.mid_block.resnets.1.conv1.q_input3Ascale = util.global.load @"__auto.mid_block.resnets.1.conv1.q_input:scale" : tensor<f16>
%__auto.mid_block.resnets.1.conv1.q_input3Arscale = util.global.load @"__auto.mid_block.resnets.1.conv1.q_input:rscale" : tensor<f16>
%__auto.mid_block.resnets.1.conv1.weight3Ad = util.global.load @"__auto.mid_block.resnets.1.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.mid_block.resnets.1.conv1.weight3Am = util.global.load @"__auto.mid_block.resnets.1.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.mid_block.resnets.1.conv1.weight3Aqs = util.global.load @"__auto.mid_block.resnets.1.conv1.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.mid_block.resnets.1.conv1.bias = util.global.load @__auto.mid_block.resnets.1.conv1.bias : tensor<1280xf16>
%__auto.mid_block.resnets.1.time_emb_proj.weight = util.global.load @__auto.mid_block.resnets.1.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.mid_block.resnets.1.time_emb_proj.bias = util.global.load @__auto.mid_block.resnets.1.time_emb_proj.bias : tensor<1280xf16>
%__auto.mid_block.resnets.1.norm2.weight = util.global.load @__auto.mid_block.resnets.1.norm2.weight : tensor<1280xf16>
%__auto.mid_block.resnets.1.norm2.bias = util.global.load @__auto.mid_block.resnets.1.norm2.bias : tensor<1280xf16>
%__auto.mid_block.resnets.1.conv2.premul_input = util.global.load @__auto.mid_block.resnets.1.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.mid_block.resnets.1.conv2.q_input3Ascale = util.global.load @"__auto.mid_block.resnets.1.conv2.q_input:scale" : tensor<f16>
%__auto.mid_block.resnets.1.conv2.q_input3Arscale = util.global.load @"__auto.mid_block.resnets.1.conv2.q_input:rscale" : tensor<f16>
%__auto.mid_block.resnets.1.conv2.weight3Ad = util.global.load @"__auto.mid_block.resnets.1.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.mid_block.resnets.1.conv2.weight3Am = util.global.load @"__auto.mid_block.resnets.1.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.mid_block.resnets.1.conv2.weight3Aqs = util.global.load @"__auto.mid_block.resnets.1.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.mid_block.resnets.1.conv2.bias = util.global.load @__auto.mid_block.resnets.1.conv2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.0.norm1.weight = util.global.load @__auto.up_blocks.0.resnets.0.norm1.weight : tensor<2560xf16>
%__auto.up_blocks.0.resnets.0.norm1.bias = util.global.load @__auto.up_blocks.0.resnets.0.norm1.bias : tensor<2560xf16>
%__auto.up_blocks.0.resnets.0.conv1.premul_input = util.global.load @__auto.up_blocks.0.resnets.0.conv1.premul_input : tensor<1x2560x1x1xf16>
%__auto.up_blocks.0.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.0.conv1.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.0.conv1.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.0.conv1.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.0.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.0.conv1.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.0.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.0.conv1.weight:qs" : tensor<1280x2560x3x3xi8>
%__auto.up_blocks.0.resnets.0.conv1.bias = util.global.load @__auto.up_blocks.0.resnets.0.conv1.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.0.time_emb_proj.weight = util.global.load @__auto.up_blocks.0.resnets.0.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.up_blocks.0.resnets.0.time_emb_proj.bias = util.global.load @__auto.up_blocks.0.resnets.0.time_emb_proj.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.0.norm2.weight = util.global.load @__auto.up_blocks.0.resnets.0.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.resnets.0.norm2.bias = util.global.load @__auto.up_blocks.0.resnets.0.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.0.conv2.premul_input = util.global.load @__auto.up_blocks.0.resnets.0.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.up_blocks.0.resnets.0.conv2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.0.conv2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.0.conv2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.0.conv2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.0.conv2.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.0.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.0.conv2.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.0.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.0.conv2.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.0.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.up_blocks.0.resnets.0.conv2.bias = util.global.load @__auto.up_blocks.0.resnets.0.conv2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.0.conv_shortcut.premul_input = util.global.load @__auto.up_blocks.0.resnets.0.conv_shortcut.premul_input : tensor<1x2560x1x1xf16>
%__auto.up_blocks.0.resnets.0.conv_shortcut.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.0.conv_shortcut.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.0.conv_shortcut.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.0.conv_shortcut.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.0.conv_shortcut.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.0.conv_shortcut.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.0.conv_shortcut.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.0.conv_shortcut.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.0.conv_shortcut.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.0.conv_shortcut.weight:qs" : tensor<1280x2560x1x1xi8>
%__auto.up_blocks.0.resnets.0.conv_shortcut.bias = util.global.load @__auto.up_blocks.0.resnets.0.conv_shortcut.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.norm.weight = util.global.load @__auto.up_blocks.0.attentions.0.norm.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.norm.bias = util.global.load @__auto.up_blocks.0.attentions.0.norm.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.proj_in.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.proj_in.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.proj_in.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.proj_in.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.proj_in.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.proj_in.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.proj_in.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.proj_in.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.proj_in.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.proj_in.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.proj_in.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.proj_in.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.proj_in.bias = util.global.load @__auto.up_blocks.0.attentions.0.proj_in.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.0.proj_out.premul_input = util.global.load @__auto.up_blocks.0.attentions.0.proj_out.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.0.proj_out.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.0.proj_out.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.proj_out.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.0.proj_out.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.0.proj_out.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.0.proj_out.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.0.proj_out.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.0.proj_out.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.0.proj_out.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.0.proj_out.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.0.proj_out.bias = util.global.load @__auto.up_blocks.0.attentions.0.proj_out.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.1.norm1.weight = util.global.load @__auto.up_blocks.0.resnets.1.norm1.weight : tensor<2560xf16>
%__auto.up_blocks.0.resnets.1.norm1.bias = util.global.load @__auto.up_blocks.0.resnets.1.norm1.bias : tensor<2560xf16>
%__auto.up_blocks.0.resnets.1.conv1.premul_input = util.global.load @__auto.up_blocks.0.resnets.1.conv1.premul_input : tensor<1x2560x1x1xf16>
%__auto.up_blocks.0.resnets.1.conv1.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.1.conv1.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.1.conv1.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.1.conv1.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.1.conv1.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.1.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.1.conv1.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.1.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.1.conv1.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.1.conv1.weight:qs" : tensor<1280x2560x3x3xi8>
%__auto.up_blocks.0.resnets.1.conv1.bias = util.global.load @__auto.up_blocks.0.resnets.1.conv1.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.1.time_emb_proj.weight = util.global.load @__auto.up_blocks.0.resnets.1.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.up_blocks.0.resnets.1.time_emb_proj.bias = util.global.load @__auto.up_blocks.0.resnets.1.time_emb_proj.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.1.norm2.weight = util.global.load @__auto.up_blocks.0.resnets.1.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.resnets.1.norm2.bias = util.global.load @__auto.up_blocks.0.resnets.1.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.1.conv2.premul_input = util.global.load @__auto.up_blocks.0.resnets.1.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.up_blocks.0.resnets.1.conv2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.1.conv2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.1.conv2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.1.conv2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.1.conv2.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.1.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.1.conv2.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.1.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.1.conv2.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.1.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.up_blocks.0.resnets.1.conv2.bias = util.global.load @__auto.up_blocks.0.resnets.1.conv2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.1.conv_shortcut.premul_input = util.global.load @__auto.up_blocks.0.resnets.1.conv_shortcut.premul_input : tensor<1x2560x1x1xf16>
%__auto.up_blocks.0.resnets.1.conv_shortcut.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.1.conv_shortcut.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.1.conv_shortcut.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.1.conv_shortcut.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.1.conv_shortcut.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.1.conv_shortcut.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.1.conv_shortcut.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.1.conv_shortcut.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.1.conv_shortcut.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.1.conv_shortcut.weight:qs" : tensor<1280x2560x1x1xi8>
%__auto.up_blocks.0.resnets.1.conv_shortcut.bias = util.global.load @__auto.up_blocks.0.resnets.1.conv_shortcut.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.norm.weight = util.global.load @__auto.up_blocks.0.attentions.1.norm.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.norm.bias = util.global.load @__auto.up_blocks.0.attentions.1.norm.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.proj_in.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.proj_in.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.proj_in.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.proj_in.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.proj_in.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.proj_in.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.proj_in.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.proj_in.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.proj_in.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.proj_in.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.proj_in.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.proj_in.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.proj_in.bias = util.global.load @__auto.up_blocks.0.attentions.1.proj_in.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.1.proj_out.premul_input = util.global.load @__auto.up_blocks.0.attentions.1.proj_out.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.1.proj_out.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.1.proj_out.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.proj_out.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.1.proj_out.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.1.proj_out.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.1.proj_out.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.1.proj_out.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.1.proj_out.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.1.proj_out.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.1.proj_out.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.1.proj_out.bias = util.global.load @__auto.up_blocks.0.attentions.1.proj_out.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.2.norm1.weight = util.global.load @__auto.up_blocks.0.resnets.2.norm1.weight : tensor<1920xf16>
%__auto.up_blocks.0.resnets.2.norm1.bias = util.global.load @__auto.up_blocks.0.resnets.2.norm1.bias : tensor<1920xf16>
%__auto.up_blocks.0.resnets.2.conv1.premul_input = util.global.load @__auto.up_blocks.0.resnets.2.conv1.premul_input : tensor<1x1920x1x1xf16>
%__auto.up_blocks.0.resnets.2.conv1.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.2.conv1.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.2.conv1.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.2.conv1.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.2.conv1.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.2.conv1.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.2.conv1.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.2.conv1.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.2.conv1.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.2.conv1.weight:qs" : tensor<1280x1920x3x3xi8>
%__auto.up_blocks.0.resnets.2.conv1.bias = util.global.load @__auto.up_blocks.0.resnets.2.conv1.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.2.time_emb_proj.weight = util.global.load @__auto.up_blocks.0.resnets.2.time_emb_proj.weight : tensor<1280x1280xf16>
%__auto.up_blocks.0.resnets.2.time_emb_proj.bias = util.global.load @__auto.up_blocks.0.resnets.2.time_emb_proj.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.2.norm2.weight = util.global.load @__auto.up_blocks.0.resnets.2.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.resnets.2.norm2.bias = util.global.load @__auto.up_blocks.0.resnets.2.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.2.conv2.premul_input = util.global.load @__auto.up_blocks.0.resnets.2.conv2.premul_input : tensor<1x1280x1x1xf16>
%__auto.up_blocks.0.resnets.2.conv2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.2.conv2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.2.conv2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.2.conv2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.2.conv2.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.2.conv2.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.2.conv2.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.2.conv2.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.2.conv2.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.2.conv2.weight:qs" : tensor<1280x1280x3x3xi8>
%__auto.up_blocks.0.resnets.2.conv2.bias = util.global.load @__auto.up_blocks.0.resnets.2.conv2.bias : tensor<1280xf16>
%__auto.up_blocks.0.resnets.2.conv_shortcut.premul_input = util.global.load @__auto.up_blocks.0.resnets.2.conv_shortcut.premul_input : tensor<1x1920x1x1xf16>
%__auto.up_blocks.0.resnets.2.conv_shortcut.q_input3Ascale = util.global.load @"__auto.up_blocks.0.resnets.2.conv_shortcut.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.resnets.2.conv_shortcut.q_input3Arscale = util.global.load @"__auto.up_blocks.0.resnets.2.conv_shortcut.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.resnets.2.conv_shortcut.weight3Ad = util.global.load @"__auto.up_blocks.0.resnets.2.conv_shortcut.weight:d" : tensor<1280x1x1x1xf16>
%__auto.up_blocks.0.resnets.2.conv_shortcut.weight3Am = util.global.load @"__auto.up_blocks.0.resnets.2.conv_shortcut.weight:m" : tensor<1280x1x1x1xi8>
%__auto.up_blocks.0.resnets.2.conv_shortcut.weight3Aqs = util.global.load @"__auto.up_blocks.0.resnets.2.conv_shortcut.weight:qs" : tensor<1280x1920x1x1xi8>
%__auto.up_blocks.0.resnets.2.conv_shortcut.bias = util.global.load @__auto.up_blocks.0.resnets.2.conv_shortcut.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.norm.weight = util.global.load @__auto.up_blocks.0.attentions.2.norm.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.norm.bias = util.global.load @__auto.up_blocks.0.attentions.2.norm.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.proj_in.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.proj_in.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.proj_in.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.proj_in.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.proj_in.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.proj_in.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.proj_in.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.proj_in.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.proj_in.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.proj_in.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.proj_in.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.proj_in.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.proj_in.bias = util.global.load @__auto.up_blocks.0.attentions.2.proj_in.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm2.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm2.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.premul_input : tensor<1x1x2048xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight:qs" : tensor<1280x2048xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm3.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm3.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm3.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm3.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight:d" : tensor<10240x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight:m" : tensor<10240x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight:qs" : tensor<10240x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.premul_input : tensor<1x1x5120xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight:qs" : tensor<1280x5120xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm1.weight = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm1.weight : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm1.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm1.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.q_input:rscale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight3Ad = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight:d" : tensor<1280x1xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight3Am = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight:m" : tensor<1280x1xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight3Aqs = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight:qs" : tensor<1280x1280xi8>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.bias = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.bias : tensor<1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.premul_input = util.global.load @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.premul_input : tensor<1x1x1280xf16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.q_input3Ascale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.q_input:scale" : tensor<f16>
%__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.q_input3Arscale = util.global.load @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.q_in
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment